aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 10:11:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 10:11:24 -0700
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /drivers/net/ethernet
parentmm: filemap: fix mapping->nrpages double accounting in fuse (diff)
parentMerge branch 'mlxsw-fixes' (diff)
downloadlinux-dev-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.tar.xz
linux-dev-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c42
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amazon/Kconfig27
-rw-r--r--drivers/net/ethernet/amazon/Makefile5
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h973
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2666
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h1038
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h48
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c501
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h160
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h416
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c895
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c3272
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h324
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h67
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h133
-rw-r--r--drivers/net/ethernet/amd/7990.c6
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c17
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c65
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h13
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c171
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c77
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h4
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h10
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c14
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c314
-rw-r--r--drivers/net/ethernet/broadcom/b44.c116
-rw-r--r--drivers/net/ethernet/broadcom/b44.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c79
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c19
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c28
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c131
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c276
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c135
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1251
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c140
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c112
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c27
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/cadence/macb.c97
-rw-r--r--drivers/net/ethernet/cadence/macb.h14
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c1237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h604
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c266
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c513
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1128
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h34
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c117
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h114
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c170
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h87
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c433
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h15
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c77
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c89
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c460
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h33
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c235
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h193
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c721
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1390
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c483
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c696
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c556
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h110
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c74
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h437
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c63
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c149
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h160
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h92
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c277
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h45
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c767
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c99
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c344
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c59
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c134
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c57
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c193
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c46
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h142
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c193
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c64
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c232
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c65
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c56
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c244
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c82
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c21
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c66
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c931
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h177
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c882
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c638
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c397
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c479
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c724
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c478
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h233
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h202
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c1813
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c171
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c134
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c294
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig14
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c6898
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c489
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c99
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c153
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1792
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h316
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c234
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h934
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2954
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c237
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c249
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h48
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c314
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c518
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c314
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c5
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig12
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c1528
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h248
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c227
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.h33
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c784
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h24
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c755
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h335
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c123
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c45
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/rocker/rocker.h15
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c122
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c119
-rw-r--r--drivers/net/ethernet/sfc/ef10.c247
-rw-r--r--drivers/net/ethernet/sfc/efx.c108
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c9
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h530
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h17
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.h9
-rw-r--r--drivers/net/ethernet/sfc/ptp.c16
-rw-r--r--drivers/net/ethernet/sfc/selftest.c10
-rw-r--r--drivers/net/ethernet/sfc/selftest.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c14
-rw-r--r--drivers/net/ethernet/sfc/sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h4
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c254
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c194
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/ethernet/ti/cpmac.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1306
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c91
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c22
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c21
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
405 files changed, 60217 insertions, 11248 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 25c55ab05c7d..9133e7926da5 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3089,7 +3089,7 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(new_mode, ioaddr + EL3_CMD);
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
Note that this must be done after each RxReset due to some backwards
compatibility logic in the Cyclone and Tornado ASICs */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2ffd63463299..8cc7467b6c1f 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -24,6 +24,7 @@ source "drivers/net/ethernet/agere/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
source "drivers/net/ethernet/altera/Kconfig"
+source "drivers/net/ethernet/amazon/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1d349e9aa9a6..a09423df83f2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_ALTERA_TSE) += altera/
+obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 1d1069641d81..8af2c88d5b33 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -66,7 +66,7 @@
*/
#define ZEROCOPY
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define VLAN_SUPPORT
#endif
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 38eaea18da23..00f9ee3fc3e5 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -192,8 +192,8 @@ static int desc_list_init(struct net_device *dev)
goto init_error;
skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invidate the data cache of skb->data range when it is write back
- * cache. It will prevent overwritting the new data from DMA
+ /* Invalidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwriting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
@@ -1205,7 +1205,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp)
}
/* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invidate the data cache of skb->data range when it is write back
+ /* Invalidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
@@ -1599,7 +1599,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
/* probe mac */
- /*todo: how to proble? which is revision_register */
+ /*todo: how to probe? which is revision_register */
bfin_write_EMAC_ADDRLO(0x12345678);
if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index bca07c5c94bd..f8df8248035e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1105,27 +1105,6 @@ static void greth_set_msglevel(struct net_device *dev, u32 value)
struct greth_private *greth = netdev_priv(dev);
greth->msg_enable = value;
}
-static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phy = greth->phy;
-
- if (!phy)
- return -ENODEV;
-
- return phy_ethtool_gset(phy, cmd);
-}
-
-static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phy = greth->phy;
-
- if (!phy)
- return -ENODEV;
-
- return phy_ethtool_sset(phy, cmd);
-}
static int greth_get_regs_len(struct net_device *dev)
{
@@ -1157,12 +1136,12 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo
static const struct ethtool_ops greth_ethtool_ops = {
.get_msglevel = greth_get_msglevel,
.set_msglevel = greth_set_msglevel,
- .get_settings = greth_get_settings,
- .set_settings = greth_set_settings,
.get_drvinfo = greth_get_drvinfo,
.get_regs_len = greth_get_regs_len,
.get_regs = greth_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static struct net_device_ops greth_netdev_ops = {
@@ -1224,7 +1203,7 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
static void greth_link_change(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phydev = greth->phy;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
u32 ctrl;
@@ -1307,7 +1286,6 @@ static int greth_mdio_probe(struct net_device *dev)
greth->link = 0;
greth->speed = 0;
greth->duplex = -1;
- greth->phy = phy;
return 0;
}
@@ -1325,6 +1303,7 @@ static int greth_mdio_init(struct greth_private *greth)
{
int ret;
unsigned long timeout;
+ struct net_device *ndev = greth->netdev;
greth->mdio = mdiobus_alloc();
if (!greth->mdio) {
@@ -1349,15 +1328,16 @@ static int greth_mdio_init(struct greth_private *greth)
goto unreg_mdio;
}
- phy_start(greth->phy);
+ phy_start(ndev->phydev);
/* If Ethernet debug link is used make autoneg happen right away */
if (greth->edcl && greth_edcl == 1) {
- phy_start_aneg(greth->phy);
+ phy_start_aneg(ndev->phydev);
timeout = jiffies + 6*HZ;
- while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ while (!phy_aneg_done(ndev->phydev) &&
+ time_before(jiffies, timeout)) {
}
- phy_read_status(greth->phy);
+ phy_read_status(ndev->phydev);
greth_link_change(greth->netdev);
}
@@ -1569,8 +1549,8 @@ static int greth_of_remove(struct platform_device *of_dev)
dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
- if (greth->phy)
- phy_stop(greth->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
mdiobus_unregister(greth->mdio);
unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 92dd918e4a83..9c07140a5d8d 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -123,7 +123,6 @@ struct greth_private {
struct napi_struct napi;
spinlock_t devlock;
- struct phy_device *phy;
struct mii_bus *mdio;
unsigned int link;
unsigned int speed;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index c83ebae73d91..906683851c7d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2961,7 +2961,7 @@ static void et131x_get_drvinfo(struct net_device *netdev,
sizeof(info->bus_info));
}
-static struct ethtool_ops et131x_ethtool_ops = {
+static const struct ethtool_ops et131x_ethtool_ops = {
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
new file mode 100644
index 000000000000..99b30353541a
--- /dev/null
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -0,0 +1,27 @@
+#
+# Amazon network device configuration
+#
+
+config NET_VENDOR_AMAZON
+ bool "Amazon Devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Amazon devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_AMAZON
+
+config ENA_ETHERNET
+ tristate "Elastic Network Adapter (ENA) support"
+ depends on (PCI_MSI && X86)
+ ---help---
+ This driver supports Elastic Network Adapter (ENA)"
+
+ To compile this driver as a module, choose M here.
+ The module will be called ena.
+
+endif #NET_VENDOR_AMAZON
diff --git a/drivers/net/ethernet/amazon/Makefile b/drivers/net/ethernet/amazon/Makefile
new file mode 100644
index 000000000000..8e0b73f60d51
--- /dev/null
+++ b/drivers/net/ethernet/amazon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Amazon network device drivers.
+#
+
+obj-$(CONFIG_ENA_ETHERNET) += ena/
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
new file mode 100644
index 000000000000..eaeeae06c5d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Elastic Network Adapter (ENA) device drivers.
+#
+
+obj-$(CONFIG_ENA_ETHERNET) += ena.o
+
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
new file mode 100644
index 000000000000..a46e749bf226
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -0,0 +1,973 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+enum ena_admin_aq_completion_status {
+ ENA_ADMIN_SUCCESS = 0,
+
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+enum ena_admin_aq_feature_id {
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ ENA_ADMIN_MTU = 14,
+
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers are in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+struct ena_admin_aq_common_desc {
+ /* 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command_id;
+
+ /* as appears in ena_admin_aq_opcode */
+ u8 opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ u8 flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ u32 length;
+
+ struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+ u16 sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ u8 sq_identity;
+
+ u8 reserved1;
+};
+
+struct ena_admin_aq_entry {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ u32 inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ u32 inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command;
+
+ u8 status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 extended_status;
+
+ /* serves as a hint what AQ entries can be revoked */
+ u16 sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ u32 response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ u8 sq_identity;
+
+ u8 reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ u8 sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ u8 sq_caps_3;
+
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ u16 cq_idx;
+
+ /* submission queue depth in entries */
+ u16 sq_depth;
+
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ u32 reserved0_w7;
+
+ u32 reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ u16 sq_idx;
+
+ u16 reserved;
+
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 sq_doorbell_offset;
+
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ u32 llq_descriptors_offset;
+
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ u32 llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ u8 cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ u8 cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ u16 cq_depth;
+
+ /* msix vector assigned to this cq */
+ u32 msix_vector;
+
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ u16 cq_idx;
+
+ /* actual cq depth in number of entries */
+ u16 cq_actual_depth;
+
+ u32 numa_node_register_offset;
+
+ u32 cq_head_db_register_offset;
+
+ u32 cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ u16 cq_idx;
+
+ u16 reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ u32 inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ u8 type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ u8 scope;
+
+ u16 reserved3;
+
+ /* queue id. used when scope is specific_queue */
+ u16 queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ u16 device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ u32 tx_bytes_low;
+
+ u32 tx_bytes_high;
+
+ u32 tx_pkts_low;
+
+ u32 tx_pkts_high;
+
+ u32 rx_bytes_low;
+
+ u32 rx_bytes_high;
+
+ u32 rx_pkts_low;
+
+ u32 rx_pkts_high;
+
+ u32 rx_drops_low;
+
+ u32 rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ u8 flags;
+
+ /* as appears in ena_admin_aq_feature_id */
+ u8 feature_id;
+
+ u16 reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+ u32 impl_id;
+
+ u32 device_version;
+
+ /* bitmap of ena_admin_aq_feature_id */
+ u32 supported_features;
+
+ u32 reserved3;
+
+ /* Indicates how many bits are used physical address access. */
+ u32 phys_addr_width;
+
+ /* Indicates how many bits are used virtual address access. */
+ u32 virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ u8 mac_addr[6];
+
+ u8 reserved7[2];
+
+ u32 max_mtu;
+};
+
+struct ena_admin_queue_feature_desc {
+ /* including LLQs */
+ u32 max_sq_num;
+
+ u32 max_sq_depth;
+
+ u32 max_cq_num;
+
+ u32 max_cq_depth;
+
+ u32 max_llq_num;
+
+ u32 max_llq_depth;
+
+ u32 max_header_size;
+
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
+ */
+ u16 max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ u16 max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+ /* exclude L2 */
+ u32 mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* debug area size */
+ u32 debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+ /* interrupt delay granularity in usec */
+ u16 intr_delay_resolution;
+
+ u16 reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+ /* Link speed in Mb */
+ u32 speed;
+
+ /* bit field of enum ena_admin_link types */
+ u32 supported;
+
+ /* 0 : autoneg
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ u32 supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ u32 enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
+ */
+ u32 tx;
+
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ u32 rx_supported;
+
+ u32 rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ ENA_ADMIN_CRC32 = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+ u32 keys_num;
+
+ u32 reserved;
+
+ u32 key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+ u32 supported_func;
+
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
+ */
+ u32 selected_func;
+
+ /* initial value */
+ u32 init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = 0,
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = 1,
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = 2,
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = 5,
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = 6,
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = 7,
+};
+
+struct ena_admin_proto_input {
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ u16 fields;
+
+ u16 reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA is
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ u16 supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ u16 enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+ ENA_ADMIN_OS_LINUX = 1,
+
+ ENA_ADMIN_OS_WIN = 2,
+
+ ENA_ADMIN_OS_DPDK = 3,
+
+ ENA_ADMIN_OS_FREEBSD = 4,
+
+ ENA_ADMIN_OS_IPXE = 5,
+};
+
+struct ena_admin_host_info {
+ /* defined in enum ena_admin_os_type */
+ u32 os_type;
+
+ /* os distribution string format */
+ u8 os_dist_str[128];
+
+ /* OS distribution numeric format */
+ u32 os_dist;
+
+ /* kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
+ */
+ u32 driver_version;
+
+ /* features bitmap */
+ u32 supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+ u16 cq_idx;
+
+ u16 reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+ /* min supported table size (2^min_size) */
+ u16 min_size;
+
+ /* max supported table size (2^max_size) */
+ u16 max_size;
+
+ /* table size (2^size) */
+ u16 size;
+
+ u16 reserved;
+
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
+ u32 inline_index;
+
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+struct ena_admin_get_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ u32 raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ struct ena_admin_queue_feature_desc max_queue;
+
+ struct ena_admin_feature_aenq_desc aenq;
+
+ struct ena_admin_get_feature_link_desc link;
+
+ struct ena_admin_feature_offload_desc offload;
+
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+ } u;
+};
+
+struct ena_admin_set_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ union {
+ u32 raw[11];
+
+ /* mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+struct ena_admin_set_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+ } u;
+};
+
+struct ena_admin_aenq_common_desc {
+ u16 group;
+
+ u16 syndrom;
+
+ /* 0 : phase */
+ u8 flags;
+
+ u8 reserved1[3];
+
+ u32 timestamp_low;
+
+ u32 timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+};
+
+struct ena_admin_aenq_entry {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ u32 inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* 0 : link_status */
+ u32 flags;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+ u16 req_id;
+
+ u16 reg_off;
+
+ /* value is valid when poll is cleared */
+ u32 reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
new file mode 100644
index 000000000000..3066d9c99984
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -0,0 +1,2666 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ena_com.h"
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (1000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+ | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR 0
+#define ENA_CTRL_MINOR 0
+#define ENA_CTRL_SUB_MINOR 1
+
+#define MIN_ENA_CTRL_VER \
+ (((ENA_CTRL_MAJOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((ENA_CTRL_MINOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+ ENA_CMD_SUBMITTED,
+ ENA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+ struct completion wait_event;
+ struct ena_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum ena_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ bool occupied;
+};
+
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ pr_err("dma address has more bits that the device supports\n");
+ return -EINVAL;
+ }
+
+ ena_addr->mem_addr_low = (u32)addr;
+ ena_addr->mem_addr_high = (u64)addr >> 32;
+
+ return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+
+ sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
+ GFP_KERNEL);
+
+ if (!sq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
+
+ sq->db_addr = NULL;
+
+ return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
+ GFP_KERNEL);
+
+ if (!cq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ cq->head = 0;
+ cq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+ struct ena_aenq_handlers *aenq_handlers)
+{
+ struct ena_com_aenq *aenq = &dev->aenq;
+ u32 addr_low, addr_high, aenq_caps;
+ u16 size;
+
+ dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
+
+ if (!aenq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+ writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = 0;
+ aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+ << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+ pr_err("aenq handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ aenq->aenq_handlers = aenq_handlers;
+
+ return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ struct ena_comp_ctx *comp_ctx)
+{
+ comp_ctx->occupied = false;
+ atomic_dec(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+{
+ if (unlikely(command_id >= queue->q_depth)) {
+ pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+ return NULL;
+ }
+
+ if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ pr_err("Completion context is occupied\n");
+ return NULL;
+ }
+
+ if (capture) {
+ atomic_inc(&queue->outstanding_cmds);
+ queue->comp_ctx[command_id].occupied = true;
+ }
+
+ return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 tail_masked, cmd_id;
+ u16 queue_size_mask;
+ u16 cnt;
+
+ queue_size_mask = admin_queue->q_depth - 1;
+
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+ cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ if (cnt >= admin_queue->q_depth) {
+ pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
+ admin_queue->sq.tail, admin_queue->sq.head,
+ admin_queue->q_depth);
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(-ENOSPC);
+ }
+
+ cmd_id = admin_queue->curr_cmd_id;
+
+ cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+ ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= cmd_id &
+ ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(-EINVAL);
+
+ comp_ctx->status = ENA_CMD_SUBMITTED;
+ comp_ctx->comp_size = (u32)comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ reinit_completion(&comp_ctx->wait_event);
+
+ memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+ admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+ queue_size_mask;
+
+ admin_queue->sq.tail++;
+ admin_queue->stats.submitted_cmd++;
+
+ if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+ admin_queue->sq.phase = !admin_queue->sq.phase;
+
+ writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+ size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(queue, i, false);
+ if (comp_ctx)
+ init_completion(&comp_ctx->wait_event);
+ }
+
+ return 0;
+}
+
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ unsigned long flags;
+ struct ena_comp_ctx *comp_ctx;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ if (unlikely(!admin_queue->running_state)) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ return ERR_PTR(-ENODEV);
+ }
+ comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+ cmd_size_in_bytes,
+ comp,
+ comp_size_in_bytes);
+ if (unlikely(IS_ERR(comp_ctx)))
+ admin_queue->running_state = false;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_sq *io_sq)
+{
+ size_t size;
+ int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+ sizeof(struct ena_eth_io_rx_desc);
+
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+ } else {
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ }
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ io_sq->tail = 0;
+ io_sq->next_to_comp = 0;
+ io_sq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+ int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ /* Use the basic completion descriptor for Rx */
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ prev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_cq->cdesc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ io_cq->cdesc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+
+ if (!io_cq->cdesc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ io_cq->phase = 1;
+ io_cq->head = 0;
+
+ return 0;
+}
+
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+ if (unlikely(!comp_ctx)) {
+ pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ admin_queue->running_state = false;
+ return;
+ }
+
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+ if (!admin_queue->polling)
+ complete(&comp_ctx->wait_event);
+}
+
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+ struct ena_admin_acq_entry *cqe = NULL;
+ u16 comp_num = 0;
+ u16 head_masked;
+ u8 phase;
+
+ head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+ phase = admin_queue->cq.phase;
+
+ cqe = &admin_queue->cq.entries[head_masked];
+
+ /* Go over all the completions */
+ while ((cqe->acq_common_descriptor.flags &
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ rmb();
+ ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+ head_masked++;
+ comp_num++;
+ if (unlikely(head_masked == admin_queue->q_depth)) {
+ head_masked = 0;
+ phase = !phase;
+ }
+
+ cqe = &admin_queue->cq.entries[head_masked];
+ }
+
+ admin_queue->cq.head += comp_num;
+ admin_queue->cq.phase = phase;
+ admin_queue->sq.head += comp_num;
+ admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+ if (unlikely(comp_status != 0))
+ pr_err("admin command failed[%u]\n", comp_status);
+
+ if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+ return -EINVAL;
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+ return 0;
+ case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return -ENOMEM;
+ case ENA_ADMIN_UNSUPPORTED_OPCODE:
+ return -EPERM;
+ case ENA_ADMIN_BAD_OPCODE:
+ case ENA_ADMIN_MALFORMED_REQUEST:
+ case ENA_ADMIN_ILLEGAL_PARAMETER:
+ case ENA_ADMIN_UNKNOWN_ERROR:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags;
+ u32 start_time;
+ int ret;
+
+ start_time = ((u32)jiffies_to_usecs(jiffies));
+
+ while (comp_ctx->status == ENA_CMD_SUBMITTED) {
+ if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
+ ADMIN_CMD_TIMEOUT_US) {
+ pr_err("Wait for completion (polling) timeout\n");
+ /* ENA didn't have any completion */
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.no_completion++;
+ admin_queue->running_state = false;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ ret = -ETIME;
+ goto err;
+ }
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ msleep(100);
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+ pr_err("Command was aborted\n");
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+ comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags;
+ int ret;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ admin_queue->stats.no_completion++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ if (comp_ctx->status == ENA_CMD_COMPLETED)
+ pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
+ else
+ pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
+
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
+ }
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, ret;
+ unsigned long flags;
+ int i;
+
+ might_sleep();
+
+ /* If readless is disabled, perform regular read */
+ if (!mmio_read->readless_supported)
+ return readl(ena_dev->reg_bar + offset);
+
+ spin_lock_irqsave(&mmio_read->lock, flags);
+ mmio_read->seq_num++;
+
+ read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+ mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ /* make sure read_resp->req_id get updated before the hw can write
+ * there
+ */
+ wmb();
+
+ writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+
+ for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ if (read_resp->req_id == mmio_read->seq_num)
+ break;
+
+ udelay(1);
+ }
+
+ if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+ pr_err("Read failure: wrong offset provided");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+ }
+err:
+ spin_unlock_irqrestore(&mmio_read->lock, flags);
+
+ return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ if (admin_queue->polling)
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
+
+ return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+ admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+ u8 direction;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ destroy_cmd.sq.sq_identity |= (direction <<
+ ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+ destroy_cmd.sq.sq_idx = io_sq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+ pr_err("failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+ }
+
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
+ else
+ devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+ }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ /* The resolution of the timeout is 100ms */
+ msleep(100);
+ }
+
+ return -ETIME;
+}
+
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+ !(ena_dev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_get_feat_cmd get_cmd;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+ pr_info("Feature %d isn't supported\n", feature_id);
+ return -EPERM;
+ }
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ else
+ get_cmd.aq_common_descriptor.flags = 0;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.control_buffer.address,
+ control_buf_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ get_cmd.control_buffer.length = control_buff_size;
+
+ get_cmd.feat_common.feature_id = feature_id;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct ena_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
+
+ return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ return ena_com_get_feature_ex(ena_dev,
+ get_resp,
+ feature_id,
+ 0,
+ 0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ rss->hash_key =
+ dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_key))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ rss->hash_key, rss->hash_key_dma_addr);
+ rss->hash_key = NULL;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ rss->hash_ctrl =
+ dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_ctrl))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ rss->hash_ctrl = NULL;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ u16 log_size)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ size_t tbl_size;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ if (unlikely(ret))
+ return ret;
+
+ if ((get_resp.u.ind_table.min_size > log_size) ||
+ (get_resp.u.ind_table.max_size < log_size)) {
+ pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size, 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
+ return -EINVAL;
+ }
+
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rss->rss_ind_tbl =
+ dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
+ &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+ rss->host_rss_ind_tbl =
+ devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+ rss->tbl_log_size = log_size;
+
+ return 0;
+
+mem_err2:
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+mem_err1:
+ rss->tbl_log_size = 0;
+ return -ENOMEM;
+}
+
+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ size_t tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ if (rss->rss_ind_tbl)
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+
+ if (rss->host_rss_ind_tbl)
+ devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ rss->host_rss_ind_tbl = NULL;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_sq_cmd create_cmd;
+ struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+ u8 direction;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ create_cmd.sq_identity |= (direction <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+ create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+ create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+ create_cmd.sq_caps_3 |=
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+ create_cmd.cq_idx = cq_idx;
+ create_cmd.sq_depth = io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ pr_err("Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_sq->idx = cmd_completion.sq_idx;
+
+ io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+ + cmd_completion.llq_headers_offset);
+
+ io_sq->desc_addr.pbuf_dev_addr =
+ (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+ cmd_completion.llq_descriptors_offset);
+ }
+
+ pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_com_io_sq *io_sq;
+ u16 qid;
+ int i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ qid = rss->host_rss_ind_tbl[i];
+ if (qid >= ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+
+ if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+ return -EINVAL;
+
+ rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+ }
+
+ return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+ u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+ struct ena_rss *rss = &ena_dev->rss;
+ u8 idx;
+ u16 i;
+
+ for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+ dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+ idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+
+ if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+
+ rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+ }
+
+ return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ size_t size;
+
+ size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+ ena_dev->intr_moder_tbl =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ if (!ena_dev->intr_moder_tbl)
+ return -ENOMEM;
+
+ ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+ return 0;
+}
+
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int i;
+
+ if (!intr_delay_resolution) {
+ pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ intr_delay_resolution = 1;
+ }
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+ /* update Rx */
+ for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+ intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+ /* update Tx */
+ ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/******************************* API ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct ena_comp_ctx *comp_ctx;
+ int ret;
+
+ comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+ comp, comp_size);
+ if (unlikely(IS_ERR(comp_ctx))) {
+ pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
+ return PTR_ERR(comp_ctx);
+ }
+
+ ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+ if (unlikely(ret)) {
+ if (admin_queue->running_state)
+ pr_err("Failed to process command. ret = %d\n", ret);
+ else
+ pr_debug("Failed to process command. ret = %d\n", ret);
+ }
+ return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_cq_cmd create_cmd;
+ struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_caps_1 |=
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+ create_cmd.msix_vector = io_cq->msix_vector;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ pr_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register_offset);
+
+ if (cmd_completion.cq_head_db_register_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_register_offset);
+
+ if (cmd_completion.numa_node_register_offset)
+ io_cq->numa_node_cfg_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+ pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq)
+{
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+
+ *io_sq = &ena_dev->io_sq_queues[qid];
+ *io_cq = &ena_dev->io_cq_queues[qid];
+
+ return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ if (!admin_queue->comp_ctx)
+ return;
+
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
+ if (unlikely(!comp_ctx))
+ break;
+
+ comp_ctx->status = ENA_CMD_ABORTED;
+
+ complete(&comp_ctx->wait_event);
+ }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ }
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+ pr_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+ u16 depth = ena_dev->aenq.q_depth;
+
+ WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+
+ /* Init head_db to mark that all entries in the queue
+ * are initially available
+ */
+ writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ if (ret) {
+ pr_info("Can't get aenq configuration\n");
+ return ret;
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+ pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ get_resp.u.aenq.supported_groups, groups_flag);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+ cmd.u.aenq.enabled_groups = groups_flag;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+ u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ int width;
+
+ if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ pr_debug("ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+ pr_err("DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+ ena_dev->dma_addr_bits = width;
+
+ return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+ u32 ver;
+ u32 ctrl_ver;
+ u32 ctrl_ver_masked;
+
+ /* Make sure the ENA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+ (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ pr_info("ena device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_ENA_VER) {
+ pr_err("ENA version is lower than the minimal version the driver supports\n");
+ return -1;
+ }
+
+ pr_info("ena controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+ pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
+
+ if (admin_queue->comp_ctx)
+ devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
+ admin_queue->comp_ctx = NULL;
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr);
+ aenq->entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+ ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+ mmio_read->read_resp =
+ dma_zalloc_coherent(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (unlikely(!mmio_read->read_resp))
+ return -ENOMEM;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ mmio_read->read_resp->req_id = 0x0;
+ mmio_read->seq_num = 0x0;
+ mmio_read->readless_supported = true;
+
+ return 0;
+}
+
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+
+ mmio_read->read_resp = NULL;
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ u32 addr_low, addr_high;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+ int ret;
+
+ dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+ pr_err("Device isn't ready, abort com init\n");
+ return -ENODEV;
+ }
+
+ admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+ admin_queue->q_dmadev = ena_dev->dmadev;
+ admin_queue->polling = false;
+ admin_queue->curr_cmd_id = 0;
+
+ atomic_set(&admin_queue->outstanding_cmds, 0);
+
+ if (init_spinlock)
+ spin_lock_init(&admin_queue->q_lock);
+
+ ret = ena_com_init_comp_ctxt(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_sq(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_cq(admin_queue);
+ if (ret)
+ goto error;
+
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
+
+ aq_caps = 0;
+ aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ acq_caps = 0;
+ acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+ writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
+ ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+ if (ret)
+ goto error;
+
+ admin_queue->running_state = true;
+
+ return 0;
+error:
+ ena_com_admin_destroy(ena_dev);
+
+ return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[ctx->qid];
+ io_cq = &ena_dev->io_cq_queues[ctx->qid];
+
+ memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
+ memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+
+ /* Init CQ */
+ io_cq->q_depth = ctx->queue_size;
+ io_cq->direction = ctx->direction;
+ io_cq->qid = ctx->qid;
+
+ io_cq->msix_vector = ctx->msix_vector;
+
+ io_sq->q_depth = ctx->queue_size;
+ io_sq->direction = ctx->direction;
+ io_sq->qid = ctx->qid;
+
+ io_sq->mem_queue_type = ctx->mem_queue_type;
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+ io_sq->tx_max_header_size =
+ min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+ goto error;
+ ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+ if (ret)
+ goto destroy_io_cq;
+
+ return 0;
+
+destroy_io_cq:
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+ return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ ena_com_destroy_io_sq(ena_dev, io_sq);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_DEVICE_ATTRIBUTES);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+ sizeof(get_resp.u.dev_attr));
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_AENQ_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+ sizeof(get_resp.u.aenq));
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+ sizeof(get_resp.u.offload));
+
+ return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+ ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+ u16 group)
+{
+ struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+ if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
+ ena_aenq_handler handler_cb;
+ u16 masked_head, processed = 0;
+ u8 phase;
+
+ masked_head = aenq->head & (aenq->q_depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+ phase) {
+ pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrom,
+ (u64)aenq_common->timestamp_low +
+ ((u64)aenq_common->timestamp_high << 32));
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ masked_head++;
+ processed++;
+
+ if (unlikely(masked_head == aenq->q_depth)) {
+ masked_head = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[masked_head];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->head += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+ writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+{
+ u32 stat, timeout, cap, reset_val;
+ int rc;
+
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+ (cap == ENA_MMIO_READ_TIMEOUT))) {
+ pr_err("Reg read32 timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+ pr_err("Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (timeout == 0) {
+ pr_err("Invalid timeout value\n");
+ return -EINVAL;
+ }
+
+ /* start reset */
+ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+
+ /* Write again the MMIO read request address */
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+ pr_err("Reset indication didn't turn on\n");
+ return rc;
+ }
+
+ /* reset done */
+ writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+ pr_err("Reset indication didn't turn off\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
+
+ return ret;
+}
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+ pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+ cmd.u.mtu.mtu = mtu;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload)
+{
+ int ret;
+ struct ena_admin_get_feat_resp resp;
+
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (unlikely(ret)) {
+ pr_err("Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+ memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+ return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return -EPERM;
+ }
+
+ /* Validate hash function is supported */
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ if (unlikely(ret))
+ return ret;
+
+ if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ pr_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+ cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+ cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_key_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ pr_err("Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ /* Make sure size is a mult of DWs */
+ if (unlikely(key_len & 0x3))
+ return -EINVAL;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ pr_err("Flow hash function %d isn't supported\n", func);
+ return -EPERM;
+ }
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+ if (key_len > sizeof(hash_key->key)) {
+ pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+ break;
+ default:
+ pr_err("Invalid hash function (%d)\n", func);
+ return -EINVAL;
+ }
+
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+ if (unlikely(rc))
+ ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+ return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ if (func)
+ *func = rss->hash_func;
+
+ if (key)
+ memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+
+ return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_INPUT,
+ rss->hash_ctrl_dma_addr,
+ sizeof(*rss->hash_ctrl));
+ if (unlikely(rc))
+ return rc;
+
+ if (fields)
+ *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+ return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_INPUT)) {
+ pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+ cmd.u.flow_hash_input.enabled_input_sort =
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_ctrl_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+ pr_err("Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl =
+ rss->hash_ctrl;
+ u16 available_fields = 0;
+ int rc, i;
+
+ /* Get the supported hash input */
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+ for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+ available_fields = hash_ctrl->selected_fields[i].fields &
+ hash_ctrl->supported_fields[i].fields;
+ if (available_fields != hash_ctrl->selected_fields[i].fields) {
+ pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
+ return -EPERM;
+ }
+ }
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ u16 supported_fields;
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+ pr_err("Invalid proto num (%u)\n", proto);
+ return -EINVAL;
+ }
+
+ /* Get the ctrl table */
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ /* Make sure all the fields are supported */
+ supported_fields = hash_ctrl->supported_fields[proto].fields;
+ if ((hash_fields & supported_fields) != hash_fields) {
+ pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
+ }
+
+ hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+ return -EINVAL;
+
+ if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+ return -EINVAL;
+
+ rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+ return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(
+ ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return -EPERM;
+ }
+
+ ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+ if (ret) {
+ pr_err("Failed to convert host indirection table to device table\n");
+ return ret;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.u.ind_table.size = rss->tbl_log_size;
+ cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->rss_ind_tbl_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 tbl_size;
+ int i, rc;
+
+ tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ rss->rss_ind_tbl_dma_addr,
+ tbl_size);
+ if (unlikely(rc))
+ return rc;
+
+ if (!ind_tbl)
+ return 0;
+
+ rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+ if (unlikely(rc))
+ return rc;
+
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+ return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+ int rc;
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
+ rc = ena_com_hash_key_allocate(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_key;
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+
+ return 0;
+
+err_hash_ctrl:
+ ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+ ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+ return rc;
+}
+
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+ ena_com_indirect_table_destroy(ena_dev);
+ ena_com_hash_key_destroy(ena_dev);
+ ena_com_hash_ctrl_destroy(ena_dev);
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+}
+
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ host_attr->host_info =
+ dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->host_info))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ host_attr->debug_area_virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
+ &host_attr->debug_area_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return -ENOMEM;
+ }
+
+ host_attr->debug_area_size = debug_area_size;
+
+ return 0;
+}
+
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->host_info) {
+ dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
+ host_attr->host_info_dma_addr);
+ host_attr->host_info = NULL;
+ }
+}
+
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->debug_area_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_HOST_ATTR_CONFIG)) {
+ pr_warn("Set host attribute isn't supported\n");
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.debug_ba,
+ host_attr->debug_area_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.os_info_ba,
+ host_attr->host_info_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set host attributes: %d\n", ret);
+
+ return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ pr_err("Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+ ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+ ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ pr_err("Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+ /* We use LOWEST entry of moderation table for storing
+ * nonadaptive interrupt coalescing values
+ */
+ ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ if (ena_dev->intr_moder_tbl)
+ devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+ ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ u16 delay_resolution;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+
+ if (rc) {
+ if (rc == -EPERM) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+ ena_com_disable_adaptive_moderation(ena_dev);
+ return rc;
+ }
+
+ rc = ena_com_init_interrupt_moderation_table(ena_dev);
+ if (rc)
+ goto err;
+
+ /* if moderation is supported by device we set adaptive moderation */
+ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+ ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ return 0;
+err:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ return rc;
+}
+
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (!intr_moder_tbl)
+ return;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ ENA_INTR_LOWEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+ ENA_INTR_LOWEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+ ENA_INTR_LOWEST_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+ ENA_INTR_LOW_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+ ENA_INTR_LOW_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+ ENA_INTR_LOW_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+ ENA_INTR_MID_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+ ENA_INTR_MID_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+ ENA_INTR_MID_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+ ENA_INTR_HIGH_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+ ENA_INTR_HIGH_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+ ENA_INTR_HIGH_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+ ENA_INTR_HIGHEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+ ENA_INTR_HIGHEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+ ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (intr_moder_tbl)
+ return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+ return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ intr_moder_tbl[level].intr_moder_interval /=
+ ena_dev->intr_delay_resolution;
+ intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+ entry->pkts_per_interval =
+ intr_moder_tbl[level].pkts_per_interval;
+ entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
new file mode 100644
index 000000000000..509d7b8e15ab
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -0,0 +1,1038 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "ena_common_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_regs_defs.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define ENA_MAX_NUM_IO_QUEUES 128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS (0)
+#define ENA_INTR_LOWEST_PKTS (3)
+#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+
+#define ENA_INTR_LOW_USECS (32)
+#define ENA_INTR_LOW_PKTS (12)
+#define ENA_INTR_LOW_BYTES (16 * 1024)
+
+#define ENA_INTR_MID_USECS (80)
+#define ENA_INTR_MID_PKTS (48)
+#define ENA_INTR_MID_BYTES (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS (128)
+#define ENA_INTR_HIGH_PKTS (96)
+#define ENA_INTR_HIGH_BYTES (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS (192)
+#define ENA_INTR_HIGHEST_PKTS (128)
+#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 2
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+enum ena_intr_moder_level {
+ ENA_INTR_MODER_LOWEST = 0,
+ ENA_INTR_MODER_LOW,
+ ENA_INTR_MODER_MID,
+ ENA_INTR_MODER_HIGH,
+ ENA_INTR_MODER_HIGHEST,
+ ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+ unsigned int intr_moder_interval;
+ unsigned int pkts_per_interval;
+ unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+ dma_addr_t paddr; /**< Buffer physical address */
+ u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+ u16 len;
+ u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
+ dma_addr_t phys_addr;
+};
+
+struct ena_com_tx_meta {
+ u16 mss;
+ u16 l3_hdr_len;
+ u16 l3_hdr_offset;
+ u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_io_cq {
+ struct ena_com_io_desc_addr cdesc_addr;
+
+ /* Interrupt unmask register */
+ u32 __iomem *unmask_reg;
+
+ /* The completion queue head doorbell register */
+ u32 __iomem *cq_head_db_reg;
+
+ /* numa configuration register (for TPH) */
+ u32 __iomem *numa_node_cfg_reg;
+
+ /* The value to write to the above register to unmask
+ * the interrupt of this queue
+ */
+ u32 msix_vector;
+
+ enum queue_direction direction;
+
+ /* holds the number of cdesc of the current packet */
+ u16 cur_rx_pkt_cdesc_count;
+ /* save the firt cdesc idx of the current packet */
+ u16 cur_rx_pkt_cdesc_start_idx;
+
+ u16 q_depth;
+ /* Caller qid */
+ u16 qid;
+
+ /* Device queue index */
+ u16 idx;
+ u16 head;
+ u16 last_head_update;
+ u8 phase;
+ u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_sq {
+ struct ena_com_io_desc_addr desc_addr;
+
+ u32 __iomem *db_addr;
+ u8 __iomem *header_addr;
+
+ enum queue_direction direction;
+ enum ena_admin_placement_policy_type mem_queue_type;
+
+ u32 msix_vector;
+ struct ena_com_tx_meta cached_tx_meta;
+
+ u16 q_depth;
+ u16 qid;
+
+ u16 idx;
+ u16 tail;
+ u16 next_to_comp;
+ u32 tx_max_header_size;
+ u8 phase;
+ u8 desc_entry_size;
+ u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+ struct ena_admin_acq_entry *entries;
+ dma_addr_t dma_addr;
+
+ u16 head;
+ u8 phase;
+};
+
+struct ena_com_admin_sq {
+ struct ena_admin_aq_entry *entries;
+ dma_addr_t dma_addr;
+
+ u32 __iomem *db_addr;
+
+ u16 head;
+ u16 tail;
+ u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+ u32 aborted_cmd;
+ u32 submitted_cmd;
+ u32 completed_cmd;
+ u32 out_of_space;
+ u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+ void *q_dmadev;
+ spinlock_t q_lock; /* spinlock for the admin queue */
+ struct ena_comp_ctx *comp_ctx;
+ u16 q_depth;
+ struct ena_com_admin_cq cq;
+ struct ena_com_admin_sq sq;
+
+ /* Indicate if the admin queue should poll for completion */
+ bool polling;
+
+ u16 curr_cmd_id;
+
+ /* Indicate that the ena was initialized and can
+ * process new admin commands
+ */
+ bool running_state;
+
+ /* Count the number of outstanding admin commands */
+ atomic_t outstanding_cmds;
+
+ struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+ u16 head;
+ u8 phase;
+ struct ena_admin_aenq_entry *entries;
+ dma_addr_t dma_addr;
+ u16 q_depth;
+ struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+ struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ u16 seq_num;
+ bool readless_supported;
+ /* spin lock to ensure a single outstanding read */
+ spinlock_t lock;
+};
+
+struct ena_rss {
+ /* Indirect table */
+ u16 *host_rss_ind_tbl;
+ struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_dma_addr;
+ u16 tbl_log_size;
+
+ /* Hash key */
+ enum ena_admin_hash_functions hash_func;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ dma_addr_t hash_key_dma_addr;
+ u32 hash_init_val;
+
+ /* Flow Control */
+ struct ena_admin_feature_rss_hash_control *hash_ctrl;
+ dma_addr_t hash_ctrl_dma_addr;
+
+};
+
+struct ena_host_attribute {
+ /* Debug area */
+ u8 *debug_area_virt_addr;
+ dma_addr_t debug_area_dma_addr;
+ u32 debug_area_size;
+
+ /* Host information */
+ struct ena_admin_host_info *host_info;
+ dma_addr_t host_info_dma_addr;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+ struct ena_com_admin_queue admin_queue;
+ struct ena_com_aenq aenq;
+ struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+ struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+ u8 __iomem *reg_bar;
+ void __iomem *mem_bar;
+ void *dmadev;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ u32 tx_max_header_size;
+ u16 stats_func; /* Selected function for extended statistic dump */
+ u16 stats_queue; /* Selected queue for extended statistic dump */
+
+ struct ena_com_mmio_read mmio_read;
+
+ struct ena_rss rss;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+ bool adaptive_coalescing;
+ u16 intr_delay_resolution;
+ u32 intr_moder_tx_interval;
+ struct ena_intr_moder_entry *intr_moder_tbl;
+};
+
+struct ena_com_dev_get_features_ctx {
+ struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_aenq_desc aenq;
+ struct ena_admin_feature_offload_desc offload;
+};
+
+struct ena_com_create_io_ctx {
+ enum ena_admin_placement_policy_type mem_queue_type;
+ enum queue_direction direction;
+ int numa_node;
+ u32 msix_vector;
+ u16 queue_size;
+ u16 qid;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+ struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+ ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+ ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @readless_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+ bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @ctx - create context structure
+ *
+ * Create the submission and the completion queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx);
+
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ */
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_info - Allocate host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_allocate_debug_area - Allocate debug area.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size - debug area size.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size);
+
+/* ena_com_delete_debug_area - Free the debug area resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate debug area.
+ */
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_host_info - Free the host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info.
+ */
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes (debug area and host info) base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *cmd_comp,
+ size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->adaptive_coalescing;
+}
+
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = true;
+}
+
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
+{
+ enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+ struct ena_intr_moder_entry *curr_moder_entry;
+ struct ena_intr_moder_entry *pred_moder_entry;
+ struct ena_intr_moder_entry *new_moder_entry;
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int interval;
+
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ if (!pkts || !bytes)
+ /* Tx interrupt, or spurious interrupt,
+ * in both cases we just use same delay values
+ */
+ return;
+
+ curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
+ if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
+ pr_err("Wrong moderation index %u\n", curr_moder_idx);
+ return;
+ }
+
+ curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+ new_moder_idx = curr_moder_idx;
+
+ if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+ if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ } else {
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
+
+ if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+ (bytes <= pred_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
+ else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval)) {
+ if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ }
+ }
+ new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+ interval = new_moder_entry->intr_moder_interval;
+ *smoothed_interval = (
+ (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+ ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+ 10;
+
+ *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+ u32 rx_delay_interval,
+ u32 tx_delay_interval,
+ bool unmask)
+{
+ intr_reg->intr_control = 0;
+ intr_reg->intr_control |= rx_delay_interval &
+ ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ intr_reg->intr_control |=
+ (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+
+ if (unmask)
+ intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
new file mode 100644
index 000000000000..bb8d73676eab
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ u32 mem_addr_low;
+
+ u16 mem_addr_high;
+
+ /* MBZ */
+ u16 reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
new file mode 100644
index 000000000000..539c536464a5
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 expected_phase, head_masked;
+ u16 desc_phase;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+ desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+ return NULL;
+
+ return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked;
+ u32 offset;
+
+ tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+ offset = tail_masked * io_sq->desc_entry_size;
+
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
+}
+
+static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u32 offset = tail_masked * io_sq->desc_entry_size;
+
+ /* In case this queue isn't a LLQ */
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
+ io_sq->desc_entry_size);
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
+}
+
+static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
+ u8 *head_src, u16 header_len)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u8 __iomem *dev_head_addr =
+ io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (unlikely(!io_sq->header_addr)) {
+ pr_err("Push buffer header ptr is NULL\n");
+ return -EINVAL;
+ }
+
+ memcpy_toio(dev_head_addr, head_src, header_len);
+
+ return 0;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+ ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+ idx &= (io_cq->q_depth - 1);
+ return (struct ena_eth_io_rx_cdesc_base *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 count = 0, head_masked;
+ u32 last = 0;
+
+ do {
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (!cdesc)
+ break;
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+ last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+ if (last) {
+ *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+ count += io_cq->cur_rx_pkt_cdesc_count;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+ io_cq->cur_rx_pkt_cdesc_count = 0;
+ io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+ pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
+ } else {
+ io_cq->cur_rx_pkt_cdesc_count += count;
+ count = 0;
+ }
+
+ return count;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ int rc;
+
+ if (ena_tx_ctx->meta_valid) {
+ rc = memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ if (unlikely(rc != 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ meta_desc = get_sq_desc(io_sq);
+ memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+ /* bits 0-9 of the mss */
+ meta_desc->word2 |= (ena_meta->mss <<
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ /* bits 10-13 of the mss */
+ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+
+ /* Extended meta desc */
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ meta_desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->word2 |= ena_meta->l3_hdr_len &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+ meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+ meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+ /* Cached the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ ena_rx_ctx->l3_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ ena_rx_ctx->l4_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->frag =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+ pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+}
+
+/*****************************************************************************/
+/***************************** API **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc)
+{
+ struct ena_eth_io_tx_desc *desc = NULL;
+ struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+ void *push_header = ena_tx_ctx->push_header;
+ u16 header_len = ena_tx_ctx->header_len;
+ u16 num_bufs = ena_tx_ctx->num_bufs;
+ int total_desc, i, rc;
+ bool have_meta;
+ u64 addr_hi;
+
+ WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
+
+ /* num_bufs +1 for potential meta desc */
+ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ pr_err("Not enough space in the tx queue\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ pr_err("header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
+ return -EINVAL;
+ }
+
+ /* start with pushing the header (if needed) */
+ rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(rc))
+ return rc;
+
+ have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+ ena_tx_ctx);
+ if (have_meta)
+ ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+ /* If the caller doesn't want send packets */
+ if (unlikely(!num_bufs && !header_len)) {
+ *nb_hw_desc = have_meta ? 0 : 1;
+ return 0;
+ }
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ /* Set first desc when we don't have meta descriptor */
+ if (!have_meta)
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+ desc->buff_addr_hi_hdr_sz |= (header_len <<
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+ /* Bits 0-9 */
+ desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+ desc->meta_ctrl |= (ena_tx_ctx->df <<
+ ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+ ENA_ETH_IO_TX_DESC_DF_MASK;
+
+ /* Bits 10-15 */
+ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+ if (ena_tx_ctx->meta_valid) {
+ desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+ ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+ ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ /* The first desc share the same desc as the header */
+ if (likely(i != 0)) {
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ }
+
+ desc->len_ctrl |= ena_bufs->len &
+ ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+ addr_hi = ((ena_bufs->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ desc->buff_addr_lo = (u32)ena_bufs->paddr;
+ desc->buff_addr_hi_hdr_sz |= addr_hi &
+ ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+ ena_bufs++;
+ }
+
+ /* set the last desc indicator */
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+
+ ena_com_sq_update_tail(io_sq);
+
+ total_desc = max_t(u16, num_bufs, 1);
+ total_desc += have_meta ? 1 : 0;
+
+ *nb_hw_desc = total_desc;
+ return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 cdesc_idx = 0;
+ u16 nb_hw_desc;
+ u16 i;
+
+ WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
+
+ nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ if (nb_hw_desc == 0) {
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+ }
+
+ pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+ ena_rx_ctx->max_bufs);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ ena_buf->len = cdesc->length;
+ ena_buf->req_id = cdesc->req_id;
+ ena_buf++;
+ }
+
+ /* Update SQ head ptr */
+ io_sq->next_to_comp += nb_hw_desc;
+
+ pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id)
+{
+ struct ena_eth_io_rx_desc *desc;
+
+ WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
+
+ if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ return -ENOSPC;
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+ desc->length = ena_buf->len;
+
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+ desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+ desc->req_id = req_id;
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ ena_com_sq_update_tail(io_sq);
+
+ return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ ena_com_cq_inc_head(io_cq);
+
+ *req_id = cdesc->req_id;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
new file mode 100644
index 000000000000..bb53c3a4f8e9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+ struct ena_com_tx_meta ena_meta;
+ struct ena_com_buf *ena_bufs;
+ /* For LLQ, header buffer - pushed to the device mem space */
+ void *push_header;
+
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ u16 num_bufs;
+ u16 req_id;
+ /* For regular queue, indicate the size of the header
+ * For LLQ, indicate the size of the pushed buffer
+ */
+ u16 header_len;
+
+ u8 meta_valid;
+ u8 tso_enable;
+ u8 l3_csum_enable;
+ u8 l4_csum_enable;
+ u8 l4_csum_partial;
+ u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+ struct ena_com_rx_buf_info *ena_bufs;
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ bool l3_csum_err;
+ bool l4_csum_err;
+ /* fragmented packet */
+ bool frag;
+ u32 hash;
+ u16 descs;
+ int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+ struct ena_eth_io_intr_reg *intr_reg)
+{
+ writel(intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+{
+ u16 tail, next_to_comp, cnt;
+
+ next_to_comp = io_sq->next_to_comp;
+ tail = io_sq->tail;
+ cnt = tail - next_to_comp;
+
+ return io_sq->q_depth - 1 - cnt;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail;
+
+ tail = io_sq->tail;
+
+ pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
+
+ writel(tail, io_sq->db_addr);
+
+ return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+ u16 unreported_comp, head;
+ bool need_update;
+
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (io_cq->cq_head_db_reg && need_update) {
+ pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ writel(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
+
+ return 0;
+}
+
+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
+ u8 numa_node)
+{
+ struct ena_eth_io_numa_node_cfg_reg numa_cfg;
+
+ if (!io_cq->numa_node_cfg_reg)
+ return;
+
+ numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
+ | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+
+ writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+ io_sq->next_to_comp += elem;
+}
+
+#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
new file mode 100644
index 000000000000..f320c58793a5
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+struct ena_eth_io_tx_desc {
+ /* 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ u32 len_ctrl;
+
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
+ * required when l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only.
+ * 12:8 : l4_proto_idx - L4 protocol. This field need
+ * to be set when l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum.
+ * 14 : l4_csum_en - enable TCP/UDP checksum.
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : reserved18 - MBZ
+ * 21 : reserved21 - MBZ
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ u32 meta_ctrl;
+
+ u32 buff_addr_lo;
+
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ u32 buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+ /* 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : reserved10 - MBZ
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ u32 len_ctrl;
+
+ /* 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ u32 word1;
+
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len
+ * 31:22 : mss_lo
+ */
+ u32 word2;
+
+ u32 reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+ /* Request ID[15:0] */
+ u16 req_id;
+
+ u8 status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 sub_qid;
+
+ u16 sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+ /* In bytes. 0 means 64KB */
+ u16 length;
+
+ /* MBZ */
+ u8 reserved2;
+
+ /* 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ u8 ctrl;
+
+ u16 req_id;
+
+ /* MBZ */
+ u16 reserved6;
+
+ u32 buff_addr_lo;
+
+ u16 buff_addr_hi;
+
+ /* MBZ */
+ u16 reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
+ * 7 : reserved7 - MBZ
+ * 12:8 : l4_proto_idx
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l4_proto_idx indicates TCP/UDP packet, and,
+ * ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 23:16 : reserved16
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 29:28 : reserved28
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ u32 status;
+
+ u16 length;
+
+ u16 req_id;
+
+ /* 32-bit hash result */
+ u32 hash;
+
+ u16 sub_qid;
+
+ u16 reserved;
+};
+
+/* 8-word format */
+struct ena_eth_io_rx_cdesc_ext {
+ struct ena_eth_io_rx_cdesc_base base;
+
+ u32 buff_addr_lo;
+
+ u16 buff_addr_hi;
+
+ u16 reserved16;
+
+ u32 reserved_w6;
+
+ u32 reserved_w7;
+};
+
+struct ena_eth_io_intr_reg {
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
+ * 31 : reserved
+ */
+ u32 intr_control;
+};
+
+struct ena_eth_io_numa_node_cfg_reg {
+ /* 7:0 : numa
+ * 30:8 : reserved
+ * 31 : enabled
+ */
+ u32 numa_cfg;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+/* numa_node_cfg_reg */
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
new file mode 100644
index 000000000000..67b2338f8fb3
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -0,0 +1,895 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "ena_netdev.h"
+
+struct ena_stats {
+ char name[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define ENA_STAT_ENA_COM_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
+}
+
+#define ENA_STAT_ENTRY(stat, stat_type) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
+}
+
+#define ENA_STAT_RX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, rx)
+
+#define ENA_STAT_TX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, tx)
+
+#define ENA_STAT_GLOBAL_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, dev)
+
+static const struct ena_stats ena_stats_global_strings[] = {
+ ENA_STAT_GLOBAL_ENTRY(tx_timeout),
+ ENA_STAT_GLOBAL_ENTRY(io_suspend),
+ ENA_STAT_GLOBAL_ENTRY(io_resume),
+ ENA_STAT_GLOBAL_ENTRY(wd_expired),
+ ENA_STAT_GLOBAL_ENTRY(interface_up),
+ ENA_STAT_GLOBAL_ENTRY(interface_down),
+ ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+};
+
+static const struct ena_stats ena_stats_tx_strings[] = {
+ ENA_STAT_TX_ENTRY(cnt),
+ ENA_STAT_TX_ENTRY(bytes),
+ ENA_STAT_TX_ENTRY(queue_stop),
+ ENA_STAT_TX_ENTRY(queue_wakeup),
+ ENA_STAT_TX_ENTRY(dma_mapping_err),
+ ENA_STAT_TX_ENTRY(linearize),
+ ENA_STAT_TX_ENTRY(linearize_failed),
+ ENA_STAT_TX_ENTRY(napi_comp),
+ ENA_STAT_TX_ENTRY(tx_poll),
+ ENA_STAT_TX_ENTRY(doorbells),
+ ENA_STAT_TX_ENTRY(prepare_ctx_err),
+ ENA_STAT_TX_ENTRY(missing_tx_comp),
+ ENA_STAT_TX_ENTRY(bad_req_id),
+};
+
+static const struct ena_stats ena_stats_rx_strings[] = {
+ ENA_STAT_RX_ENTRY(cnt),
+ ENA_STAT_RX_ENTRY(bytes),
+ ENA_STAT_RX_ENTRY(refil_partial),
+ ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(page_alloc_fail),
+ ENA_STAT_RX_ENTRY(skb_alloc_fail),
+ ENA_STAT_RX_ENTRY(dma_mapping_err),
+ ENA_STAT_RX_ENTRY(bad_desc_num),
+ ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+};
+
+static const struct ena_stats ena_stats_ena_com_strings[] = {
+ ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(completed_cmd),
+ ENA_STAT_ENA_COM_ENTRY(out_of_space),
+ ENA_STAT_ENA_COM_ENTRY(no_completion),
+};
+
+#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+
+static void ena_safe_update_stat(u64 *src, u64 *dst,
+ struct u64_stats_sync *syncp)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(syncp);
+ *(dst) = *src;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
+}
+
+static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
+{
+ const struct ena_stats *ena_stats;
+ struct ena_ring *ring;
+
+ u64 *ptr;
+ int i, j;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* Tx stats */
+ ring = &adapter->tx_ring[i];
+
+ for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
+ ena_stats = &ena_stats_tx_strings[j];
+
+ ptr = (u64 *)((uintptr_t)&ring->tx_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
+
+ /* Rx stats */
+ ring = &adapter->rx_ring[i];
+
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ptr = (u64 *)((uintptr_t)&ring->rx_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
+ }
+}
+
+static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
+{
+ const struct ena_stats *ena_stats;
+ u32 *ptr;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
+ ena_stats = &ena_stats_ena_com_strings[i];
+
+ ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ *(*data)++ = *ptr;
+ }
+}
+
+static void ena_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
+ ena_stats = &ena_stats_global_strings[i];
+
+ ptr = (u64 *)((uintptr_t)&adapter->dev_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, data++, &adapter->syncp);
+ }
+
+ ena_queue_stats(adapter, &data);
+ ena_dev_admin_queue_stats(adapter, &data);
+}
+
+int ena_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
+static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
+{
+ const struct ena_stats *ena_stats;
+ int i, j;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* Tx stats */
+ for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
+ ena_stats = &ena_stats_tx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_tx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+ /* Rx stats */
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_rx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void ena_com_dev_strings(u8 **data)
+{
+ const struct ena_stats *ena_stats;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
+ ena_stats = &ena_stats_ena_com_strings[i];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "ena_admin_q_%s", ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+}
+
+static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
+ ena_stats = &ena_stats_global_strings[i];
+
+ memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ ena_queue_strings(adapter, &data);
+ ena_com_dev_strings(&data);
+}
+
+static int ena_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_admin_get_feature_link_desc *link;
+ struct ena_admin_get_feat_resp feat_resp;
+ int rc;
+
+ rc = ena_com_get_link_params(ena_dev, &feat_resp);
+ if (rc)
+ return rc;
+
+ link = &feat_resp.u.link;
+ link_ksettings->base.speed = link->speed;
+
+ if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ }
+
+ link_ksettings->base.autoneg =
+ (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ link_ksettings->base.duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int ena_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ena_adapter *adapter = netdev_priv(net_dev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_intr_moder_entry intr_moder_entry;
+
+ if (!ena_com_interrupt_moderation_supported(ena_dev)) {
+ /* the devie doesn't support interrupt moderation */
+ return -EOPNOTSUPP;
+ }
+ coalesce->tx_coalesce_usecs =
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) /
+ ena_dev->intr_delay_resolution;
+ if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) {
+ coalesce->rx_coalesce_usecs =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
+ / ena_dev->intr_delay_resolution;
+ } else {
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval;
+
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval;
+
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval;
+ }
+ coalesce->use_adaptive_rx_coalesce =
+ ena_com_get_adaptive_moderation_enabled(ena_dev);
+
+ return 0;
+}
+
+static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
+{
+ unsigned int val;
+ int i;
+
+ val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
+
+ for (i = 0; i < adapter->num_queues; i++)
+ adapter->tx_ring[i].smoothed_interval = val;
+}
+
+static int ena_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ena_adapter *adapter = netdev_priv(net_dev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_intr_moder_entry intr_moder_entry;
+ int rc;
+
+ if (!ena_com_interrupt_moderation_supported(ena_dev)) {
+ /* the devie doesn't support interrupt moderation */
+ return -EOPNOTSUPP;
+ }
+
+ if (coalesce->rx_coalesce_usecs_irq ||
+ coalesce->rx_max_coalesced_frames_irq ||
+ coalesce->tx_coalesce_usecs_irq ||
+ coalesce->tx_max_coalesced_frames ||
+ coalesce->tx_max_coalesced_frames_irq ||
+ coalesce->stats_block_coalesce_usecs ||
+ coalesce->use_adaptive_tx_coalesce ||
+ coalesce->pkt_rate_low ||
+ coalesce->tx_coalesce_usecs_low ||
+ coalesce->tx_max_coalesced_frames_low ||
+ coalesce->pkt_rate_high ||
+ coalesce->tx_coalesce_usecs_high ||
+ coalesce->tx_max_coalesced_frames_high ||
+ coalesce->rate_sample_interval)
+ return -EINVAL;
+
+ rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
+ coalesce->tx_coalesce_usecs);
+ if (rc)
+ return rc;
+
+ ena_update_tx_rings_intr_moderation(adapter);
+
+ if (ena_com_get_adaptive_moderation_enabled(ena_dev)) {
+ if (!coalesce->use_adaptive_rx_coalesce) {
+ ena_com_disable_adaptive_moderation(ena_dev);
+ rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
+ coalesce->rx_coalesce_usecs);
+ return rc;
+ }
+ } else { /* was in non-adaptive mode */
+ if (coalesce->use_adaptive_rx_coalesce) {
+ ena_com_enable_adaptive_moderation(ena_dev);
+ } else {
+ rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
+ coalesce->rx_coalesce_usecs);
+ return rc;
+ }
+ }
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry);
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry);
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry);
+
+ return 0;
+}
+
+static u32 ena_get_msglevel(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void ena_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = value;
+}
+
+static void ena_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+}
+
+static void ena_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_ring *tx_ring = &adapter->tx_ring[0];
+ struct ena_ring *rx_ring = &adapter->rx_ring[0];
+
+ ring->rx_max_pending = rx_ring->ring_size;
+ ring->tx_max_pending = tx_ring->ring_size;
+ ring->rx_pending = rx_ring->ring_size;
+ ring->tx_pending = tx_ring->ring_size;
+}
+
+static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
+{
+ u32 data = 0;
+
+ if (hash_fields & ENA_ADMIN_RSS_L2_DA)
+ data |= RXH_L2DA;
+
+ if (hash_fields & ENA_ADMIN_RSS_L3_DA)
+ data |= RXH_IP_DST;
+
+ if (hash_fields & ENA_ADMIN_RSS_L3_SA)
+ data |= RXH_IP_SRC;
+
+ if (hash_fields & ENA_ADMIN_RSS_L4_DP)
+ data |= RXH_L4_B_2_3;
+
+ if (hash_fields & ENA_ADMIN_RSS_L4_SP)
+ data |= RXH_L4_B_0_1;
+
+ return data;
+}
+
+static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
+{
+ u16 data = 0;
+
+ if (hash_fields & RXH_L2DA)
+ data |= ENA_ADMIN_RSS_L2_DA;
+
+ if (hash_fields & RXH_IP_DST)
+ data |= ENA_ADMIN_RSS_L3_DA;
+
+ if (hash_fields & RXH_IP_SRC)
+ data |= ENA_ADMIN_RSS_L3_SA;
+
+ if (hash_fields & RXH_L4_B_2_3)
+ data |= ENA_ADMIN_RSS_L4_DP;
+
+ if (hash_fields & RXH_L4_B_0_1)
+ data |= ENA_ADMIN_RSS_L4_SP;
+
+ return data;
+}
+
+static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ enum ena_admin_flow_hash_proto proto;
+ u16 hash_fields;
+ int rc;
+
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_TCP4;
+ break;
+ case UDP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_UDP4;
+ break;
+ case TCP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_TCP6;
+ break;
+ case UDP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_UDP6;
+ break;
+ case IPV4_FLOW:
+ proto = ENA_ADMIN_RSS_IP4;
+ break;
+ case IPV6_FLOW:
+ proto = ENA_ADMIN_RSS_IP6;
+ break;
+ case ETHER_FLOW:
+ proto = ENA_ADMIN_RSS_NOT_IP;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
+ if (rc) {
+ /* If device don't have permission, return unsupported */
+ if (rc == -EPERM)
+ rc = -EOPNOTSUPP;
+ return rc;
+ }
+
+ cmd->data = ena_flow_hash_to_flow_type(hash_fields);
+
+ return 0;
+}
+
+static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ enum ena_admin_flow_hash_proto proto;
+ u16 hash_fields;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_TCP4;
+ break;
+ case UDP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_UDP4;
+ break;
+ case TCP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_TCP6;
+ break;
+ case UDP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_UDP6;
+ break;
+ case IPV4_FLOW:
+ proto = ENA_ADMIN_RSS_IP4;
+ break;
+ case IPV6_FLOW:
+ proto = ENA_ADMIN_RSS_IP6;
+ break;
+ case ETHER_FLOW:
+ proto = ENA_ADMIN_RSS_NOT_IP;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+
+ hash_fields = ena_flow_data_to_flow_hash(cmd->data);
+
+ return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
+}
+
+static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ rc = ena_set_rss_hash(adapter->ena_dev, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ case ETHTOOL_SRXCLSRLINS:
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter %d is not supported\n", info->cmd);
+ rc = -EOPNOTSUPP;
+ }
+
+ return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+}
+
+static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_queues;
+ rc = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ rc = ena_get_rss_hash(adapter->ena_dev, info);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ case ETHTOOL_GRXCLSRULE:
+ case ETHTOOL_GRXCLSRLALL:
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter %d is not supported\n", info->cmd);
+ rc = -EOPNOTSUPP;
+ }
+
+ return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+}
+
+static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return ENA_RX_RSS_TABLE_SIZE;
+}
+
+static u32 ena_get_rxfh_key_size(struct net_device *netdev)
+{
+ return ENA_HASH_KEY_SIZE;
+}
+
+static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ enum ena_admin_hash_functions ena_func;
+ u8 func;
+ int rc;
+
+ rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ if (rc)
+ return rc;
+
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+ if (rc)
+ return rc;
+
+ switch (ena_func) {
+ case ENA_ADMIN_TOEPLITZ:
+ func = ETH_RSS_HASH_TOP;
+ break;
+ case ENA_ADMIN_CRC32:
+ func = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hfunc)
+ *hfunc = func;
+
+ return rc;
+}
+
+static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ enum ena_admin_hash_functions func;
+ int rc, i;
+
+ if (indir) {
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ ENA_IO_RXQ_IDX(indir[i]),
+ i);
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, netdev,
+ "Cannot fill indirect table (index is too large)\n");
+ return rc;
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (rc) {
+ netif_err(adapter, drv, netdev,
+ "Cannot set indirect table\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ }
+
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ func = ENA_ADMIN_TOEPLITZ;
+ break;
+ case ETH_RSS_HASH_XOR:
+ func = ENA_ADMIN_CRC32;
+ break;
+ default:
+ netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
+ hfunc);
+ return -EOPNOTSUPP;
+ }
+
+ if (key) {
+ rc = ena_com_fill_hash_function(ena_dev, func, key,
+ ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, netdev, "Cannot fill key\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ }
+
+ return 0;
+}
+
+static void ena_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = adapter->num_queues;
+ channels->tx_count = adapter->num_queues;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
+static int ena_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = adapter->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ena_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+ u32 len;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ len = *(u32 *)data;
+ if (len > adapter->netdev->mtu) {
+ ret = -EINVAL;
+ break;
+ }
+ adapter->rx_copybreak = len;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops ena_ethtool_ops = {
+ .get_link_ksettings = ena_get_link_ksettings,
+ .get_drvinfo = ena_get_drvinfo,
+ .get_msglevel = ena_get_msglevel,
+ .set_msglevel = ena_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = ena_get_coalesce,
+ .set_coalesce = ena_set_coalesce,
+ .get_ringparam = ena_get_ringparam,
+ .get_sset_count = ena_get_sset_count,
+ .get_strings = ena_get_strings,
+ .get_ethtool_stats = ena_get_ethtool_stats,
+ .get_rxnfc = ena_get_rxnfc,
+ .set_rxnfc = ena_set_rxnfc,
+ .get_rxfh_indir_size = ena_get_rxfh_indir_size,
+ .get_rxfh_key_size = ena_get_rxfh_key_size,
+ .get_rxfh = ena_get_rxfh,
+ .set_rxfh = ena_set_rxfh,
+ .get_channels = ena_get_channels,
+ .get_tunable = ena_get_tunable,
+ .set_tunable = ena_set_tunable,
+};
+
+void ena_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ena_ethtool_ops;
+}
+
+static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 *strings_buf;
+ u64 *data_buf;
+ int strings_num;
+ int i, rc;
+
+ strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
+ if (strings_num <= 0) {
+ netif_err(adapter, drv, netdev, "Can't get stats num\n");
+ return;
+ }
+
+ strings_buf = devm_kzalloc(&adapter->pdev->dev,
+ strings_num * ETH_GSTRING_LEN,
+ GFP_ATOMIC);
+ if (!strings_buf) {
+ netif_err(adapter, drv, netdev,
+ "failed to alloc strings_buf\n");
+ return;
+ }
+
+ data_buf = devm_kzalloc(&adapter->pdev->dev,
+ strings_num * sizeof(u64),
+ GFP_ATOMIC);
+ if (!data_buf) {
+ netif_err(adapter, drv, netdev,
+ "failed to allocate data buf\n");
+ devm_kfree(&adapter->pdev->dev, strings_buf);
+ return;
+ }
+
+ ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
+ ena_get_ethtool_stats(netdev, NULL, data_buf);
+
+ /* If there is a buffer, dump stats, otherwise print them to dmesg */
+ if (buf)
+ for (i = 0; i < strings_num; i++) {
+ rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
+ "%s %llu\n",
+ strings_buf + i * ETH_GSTRING_LEN,
+ data_buf[i]);
+ buf += rc;
+ }
+ else
+ for (i = 0; i < strings_num; i++)
+ netif_err(adapter, drv, netdev, "%s: %llu\n",
+ strings_buf + i * ETH_GSTRING_LEN,
+ data_buf[i]);
+
+ devm_kfree(&adapter->pdev->dev, strings_buf);
+ devm_kfree(&adapter->pdev->dev, data_buf);
+}
+
+void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
+{
+ if (!buf)
+ return;
+
+ ena_dump_stats_ex(adapter, buf);
+}
+
+void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
+{
+ ena_dump_stats_ex(adapter, NULL);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
new file mode 100644
index 000000000000..bfeaec5bd7b9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -0,0 +1,3272 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif /* CONFIG_RFS_ACCEL */
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/numa.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+
+#include "ena_netdev.h"
+#include "ena_pci_id_tbl.h"
+
+static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+MODULE_DESCRIPTION(DEVICE_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (5 * HZ)
+
+#define ENA_NAPI_BUDGET 64
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
+ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static struct ena_aenq_handlers aenq_handlers;
+
+static struct workqueue_struct *ena_wq;
+
+MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
+
+static int ena_rss_init_default(struct ena_adapter *adapter);
+
+static void ena_tx_timeout(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.tx_timeout++;
+ u64_stats_update_end(&adapter->syncp);
+
+ netif_err(adapter, tx_err, dev, "Transmit time out\n");
+
+ /* Change the state of the device to trigger reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
+
+static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ adapter->rx_ring[i].mtu = mtu;
+}
+
+static int ena_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
+ netif_err(adapter, drv, dev,
+ "Invalid MTU setting. new_mtu: %d\n", new_mtu);
+
+ return -EINVAL;
+ }
+
+ ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
+ if (!ret) {
+ netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
+ update_rx_ring_mtu(adapter, new_mtu);
+ dev->mtu = new_mtu;
+ } else {
+ netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
+ new_mtu);
+ }
+
+ return ret;
+}
+
+static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
+{
+#ifdef CONFIG_RFS_ACCEL
+ u32 i;
+ int rc;
+
+ adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
+ if (!adapter->netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < adapter->num_queues; i++) {
+ int irq_idx = ENA_IO_IRQ_IDX(i);
+
+ rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
+ adapter->msix_entries[irq_idx].vector);
+ if (rc) {
+ free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
+ adapter->netdev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif /* CONFIG_RFS_ACCEL */
+ return 0;
+}
+
+static void ena_init_io_rings_common(struct ena_adapter *adapter,
+ struct ena_ring *ring, u16 qid)
+{
+ ring->qid = qid;
+ ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->napi = &adapter->ena_napi[qid].napi;
+ ring->adapter = adapter;
+ ring->ena_dev = adapter->ena_dev;
+ ring->per_napi_packets = 0;
+ ring->per_napi_bytes = 0;
+ ring->cpu = 0;
+ u64_stats_init(&ring->syncp);
+}
+
+static void ena_init_io_rings(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *txr, *rxr;
+ int i;
+
+ ena_dev = adapter->ena_dev;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ txr = &adapter->tx_ring[i];
+ rxr = &adapter->rx_ring[i];
+
+ /* TX/RX common ring state */
+ ena_init_io_rings_common(adapter, txr, i);
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* TX specific ring state */
+ txr->ring_size = adapter->tx_ring_size;
+ txr->tx_max_header_size = ena_dev->tx_max_header_size;
+ txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
+ txr->sgl_size = adapter->max_tx_sgl_size;
+ txr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->rx_ring_size;
+ rxr->rx_copybreak = adapter->rx_copybreak;
+ rxr->sgl_size = adapter->max_rx_sgl_size;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ }
+}
+
+/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+ struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
+ int size, i, node;
+
+ if (tx_ring->tx_buffer_info) {
+ netif_err(adapter, ifup,
+ adapter->netdev, "tx_buffer_info info is not NULL");
+ return -EEXIST;
+ }
+
+ size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
+ node = cpu_to_node(ena_irq->cpu);
+
+ tx_ring->tx_buffer_info = vzalloc_node(size, node);
+ if (!tx_ring->tx_buffer_info) {
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ return -ENOMEM;
+ }
+
+ size = sizeof(u16) * tx_ring->ring_size;
+ tx_ring->free_tx_ids = vzalloc_node(size, node);
+ if (!tx_ring->free_tx_ids) {
+ tx_ring->free_tx_ids = vzalloc(size);
+ if (!tx_ring->free_tx_ids) {
+ vfree(tx_ring->tx_buffer_info);
+ return -ENOMEM;
+ }
+ }
+
+ /* Req id ring for TX out of order completions */
+ for (i = 0; i < tx_ring->ring_size; i++)
+ tx_ring->free_tx_ids[i] = i;
+
+ /* Reset tx statistics */
+ memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->cpu = ena_irq->cpu;
+ return 0;
+}
+
+/* ena_free_tx_resources - Free I/O Tx Resources per Queue
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all transmit software resources
+ */
+static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ vfree(tx_ring->free_tx_ids);
+ tx_ring->free_tx_ids = NULL;
+}
+
+/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
+ * @adapter: private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_tx_resources(adapter, i);
+ if (rc)
+ goto err_setup_tx;
+ }
+
+ return 0;
+
+err_setup_tx:
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Tx queue %d: allocation failed\n", i);
+
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_tx_resources(adapter, i);
+ return rc;
+}
+
+/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_tx_resources(adapter, i);
+}
+
+/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ena_setup_rx_resources(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+ struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
+ int size, node;
+
+ if (rx_ring->rx_buffer_info) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "rx_buffer_info is not NULL");
+ return -EEXIST;
+ }
+
+ /* alloc extra element so in rx path
+ * we can always prefetch rx_info + 1
+ */
+ size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
+ node = cpu_to_node(ena_irq->cpu);
+
+ rx_ring->rx_buffer_info = vzalloc_node(size, node);
+ if (!rx_ring->rx_buffer_info) {
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ return -ENOMEM;
+ }
+
+ /* Reset rx statistics */
+ memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->cpu = ena_irq->cpu;
+
+ return 0;
+}
+
+/* ena_free_rx_resources - Free I/O Rx Resources
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all receive software resources
+ */
+static void ena_free_rx_resources(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+}
+
+/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_rx_resources(adapter, i);
+ if (rc)
+ goto err_setup_rx;
+ }
+
+ return 0;
+
+err_setup_rx:
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Rx queue %d: allocation failed\n", i);
+
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_rx_resources(adapter, i);
+ return rc;
+}
+
+/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ */
+static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_resources(adapter, i);
+}
+
+static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info, gfp_t gfp)
+{
+ struct ena_com_buf *ena_buf;
+ struct page *page;
+ dma_addr_t dma;
+
+ /* if previous allocated page is not used */
+ if (unlikely(rx_info->page))
+ return 0;
+
+ page = alloc_page(gfp);
+ if (unlikely(!page)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.page_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.dma_mapping_err++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ __free_page(page);
+ return -EIO;
+ }
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "alloc page %p, rx_info %p\n", page, rx_info);
+
+ rx_info->page = page;
+ rx_info->page_offset = 0;
+ ena_buf = &rx_info->ena_buf;
+ ena_buf->paddr = dma;
+ ena_buf->len = PAGE_SIZE;
+
+ return 0;
+}
+
+static void ena_free_rx_page(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ struct page *page = rx_info->page;
+ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+
+ if (unlikely(!page)) {
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Trying to free unallocated buffer\n");
+ return;
+ }
+
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ __free_page(page);
+ rx_info->page = NULL;
+}
+
+static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
+{
+ u16 next_to_use;
+ u32 i;
+ int rc;
+
+ next_to_use = rx_ring->next_to_use;
+
+ for (i = 0; i < num; i++) {
+ struct ena_rx_buffer *rx_info =
+ &rx_ring->rx_buffer_info[next_to_use];
+
+ rc = ena_alloc_rx_page(rx_ring, rx_info,
+ __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
+ if (unlikely(rc < 0)) {
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "failed to alloc buffer for rx queue %d\n",
+ rx_ring->qid);
+ break;
+ }
+ rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
+ &rx_info->ena_buf,
+ next_to_use);
+ if (unlikely(rc)) {
+ netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "failed to add buffer for rx queue %d\n",
+ rx_ring->qid);
+ break;
+ }
+ next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
+ rx_ring->ring_size);
+ }
+
+ if (unlikely(i < num)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.refil_partial++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netdev_warn(rx_ring->netdev,
+ "refilled rx qid %d with only %d buffers (from %d)\n",
+ rx_ring->qid, i, num);
+ }
+
+ if (likely(i)) {
+ /* Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
+ wmb();
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
+ }
+
+ rx_ring->next_to_use = next_to_use;
+
+ return i;
+}
+
+static void ena_free_rx_bufs(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+ u32 i;
+
+ for (i = 0; i < rx_ring->ring_size; i++) {
+ struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
+
+ if (rx_info->page)
+ ena_free_rx_page(rx_ring, rx_info);
+ }
+}
+
+/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
+ * @adapter: board private structure
+ *
+ */
+static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
+{
+ struct ena_ring *rx_ring;
+ int i, rc, bufs_num;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ bufs_num = rx_ring->ring_size - 1;
+ rc = ena_refill_rx_bufs(rx_ring, bufs_num);
+
+ if (unlikely(rc != bufs_num))
+ netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "refilling Queue %d failed. allocated %d buffers from: %d\n",
+ i, rc, bufs_num);
+ }
+}
+
+static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_bufs(adapter, i);
+}
+
+/* ena_free_tx_bufs - Free Tx Buffers per Queue
+ * @tx_ring: TX ring for which buffers be freed
+ */
+static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+{
+ u32 i;
+
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+ struct ena_com_buf *ena_buf;
+ int nr_frags;
+ int j;
+
+ if (!tx_info->skb)
+ continue;
+
+ netdev_notice(tx_ring->netdev,
+ "free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
+
+ ena_buf = tx_info->bufs;
+ dma_unmap_single(tx_ring->dev,
+ ena_buf->paddr,
+ ena_buf->len,
+ DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ nr_frags = tx_info->num_of_bufs - 1;
+ for (j = 0; j < nr_frags; j++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev,
+ ena_buf->paddr,
+ ena_buf->len,
+ DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(tx_info->skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
+}
+
+static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+{
+ struct ena_ring *tx_ring;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+ ena_free_tx_bufs(tx_ring);
+ }
+}
+
+static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
+{
+ u16 ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
+{
+ u16 ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_RXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
+{
+ ena_destroy_all_tx_queues(adapter);
+ ena_destroy_all_rx_queues(adapter);
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
+ }
+
+ if (tx_info)
+ netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_info doesn't have valid skb\n");
+ else
+ netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "Invalid req_id: %hu\n", req_id);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.bad_req_id++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* Trigger device reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
+ return -EFAULT;
+}
+
+static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ struct netdev_queue *txq;
+ bool above_thresh;
+ u32 tx_bytes = 0;
+ u32 total_done = 0;
+ u16 next_to_clean;
+ u16 req_id;
+ int tx_pkts = 0;
+ int rc;
+
+ next_to_clean = tx_ring->next_to_clean;
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct sk_buff *skb;
+ struct ena_com_buf *ena_buf;
+ int i, nr_frags;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc)
+ break;
+
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ skb = tx_info->skb;
+
+ /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+ prefetch(&skb->end);
+
+ tx_info->skb = NULL;
+ tx_info->last_jiffies = 0;
+
+ if (likely(tx_info->num_of_bufs != 0)) {
+ ena_buf = tx_info->bufs;
+
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ nr_frags = tx_info->num_of_bufs - 1;
+ for (i = 0; i < nr_frags; i++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d skb %p completed\n", tx_ring->qid,
+ skb);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+
+ tx_ring->free_tx_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ /* need to make the rings circular update visible to
+ * ena_start_xmit() before checking for netif_queue_stopped().
+ */
+ smp_mb();
+
+ above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
+ ENA_TX_WAKEUP_THRESH;
+ if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
+ ENA_TX_WAKEUP_THRESH;
+ if (netif_tx_queue_stopped(txq) && above_thresh) {
+ netif_tx_wake_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_wakeup++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+ __netif_tx_unlock(txq);
+ }
+
+ tx_ring->per_napi_bytes += tx_bytes;
+ tx_ring->per_napi_packets += tx_pkts;
+
+ return tx_pkts;
+}
+
+static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ struct ena_com_rx_buf_info *ena_bufs,
+ u32 descs,
+ u16 *next_to_clean)
+{
+ struct sk_buff *skb;
+ struct ena_rx_buffer *rx_info =
+ &rx_ring->rx_buffer_info[*next_to_clean];
+ u32 len;
+ u32 buf = 0;
+ void *va;
+
+ len = ena_bufs[0].len;
+ if (unlikely(!rx_info->page)) {
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Page is NULL\n");
+ return NULL;
+ }
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_info %p page %p\n",
+ rx_info, rx_info->page);
+
+ /* save virt address of first buffer */
+ va = page_address(rx_info->page) + rx_info->page_offset;
+ prefetch(va + NET_IP_ALIGN);
+
+ if (len <= rx_ring->rx_copybreak) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_copybreak);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Failed to allocate skb\n");
+ return NULL;
+ }
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx allocated small packet. len %d. data_len %d\n",
+ skb->len, skb->data_len);
+
+ /* sync this buffer for CPU use */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, va, len);
+ dma_sync_single_for_device(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ len,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
+ rx_ring->ring_size);
+ return skb;
+ }
+
+ skb = napi_get_frags(rx_ring->napi);
+ if (unlikely(!skb)) {
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "Failed allocating skb\n");
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ return NULL;
+ }
+
+ do {
+ dma_unmap_page(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ rx_info->page_offset, len, PAGE_SIZE);
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx skb updated. len %d. data_len %d\n",
+ skb->len, skb->data_len);
+
+ rx_info->page = NULL;
+ *next_to_clean =
+ ENA_RX_RING_IDX_NEXT(*next_to_clean,
+ rx_ring->ring_size);
+ if (likely(--descs == 0))
+ break;
+ rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
+ len = ena_bufs[++buf].len;
+ } while (1);
+
+ return skb;
+}
+
+/* ena_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: structure containing adapter specific data
+ * @ena_rx_ctx: received packet context/metadata
+ * @skb: skb currently being received and modified
+ */
+static inline void ena_rx_checksum(struct ena_ring *rx_ring,
+ struct ena_com_rx_ctx *ena_rx_ctx,
+ struct sk_buff *skb)
+{
+ /* Rx csum disabled */
+ if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* For fragmented packets the checksum isn't valid */
+ if (ena_rx_ctx->frag) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* if IP and error */
+ if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
+ (ena_rx_ctx->l3_csum_err))) {
+ /* ipv4 checksum error */
+ skb->ip_summed = CHECKSUM_NONE;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_csum++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "RX IPv4 header checksum error\n");
+ return;
+ }
+
+ /* if TCP/UDP */
+ if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
+ (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
+ if (unlikely(ena_rx_ctx->l4_csum_err)) {
+ /* TCP/UDP checksum error */
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_csum++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "RX L4 checksum error\n");
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static void ena_set_rx_hash(struct ena_ring *rx_ring,
+ struct ena_com_rx_ctx *ena_rx_ctx,
+ struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type;
+
+ if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
+ if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
+ (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
+
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_NONE;
+
+ /* Override hash type if the packet is fragmented */
+ if (ena_rx_ctx->frag)
+ hash_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
+ }
+}
+
+/* ena_clean_rx_irq - Cleanup RX irq
+ * @rx_ring: RX ring to clean
+ * @napi: napi handler
+ * @budget: how many packets driver is allowed to clean
+ *
+ * Returns the number of cleaned buffers.
+ */
+static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ u32 budget)
+{
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u32 res_budget, work_done;
+
+ struct ena_com_rx_ctx ena_rx_ctx;
+ struct ena_adapter *adapter;
+ struct sk_buff *skb;
+ int refill_required;
+ int refill_threshold;
+ int rc = 0;
+ int total_len = 0;
+ int rx_copybreak_pkt = 0;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "%s qid %d\n", __func__, rx_ring->qid);
+ res_budget = budget;
+
+ do {
+ ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ ena_rx_ctx.max_bufs = rx_ring->sgl_size;
+ ena_rx_ctx.descs = 0;
+ rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
+ rx_ring->ena_com_io_sq,
+ &ena_rx_ctx);
+ if (unlikely(rc))
+ goto error;
+
+ if (unlikely(ena_rx_ctx.descs == 0))
+ break;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+ rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
+ ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+
+ /* allocate skb and fill it */
+ skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
+ &next_to_clean);
+
+ /* exit if we failed to retrieve a buffer */
+ if (unlikely(!skb)) {
+ next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
+ ena_rx_ctx.descs,
+ rx_ring->ring_size);
+ break;
+ }
+
+ ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
+
+ ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
+
+ skb_record_rx_queue(skb, rx_ring->qid);
+
+ if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
+ total_len += rx_ring->ena_bufs[0].len;
+ rx_copybreak_pkt++;
+ napi_gro_receive(napi, skb);
+ } else {
+ total_len += skb->len;
+ napi_gro_frags(napi);
+ }
+
+ res_budget--;
+ } while (likely(res_budget));
+
+ work_done = budget - res_budget;
+ rx_ring->per_napi_bytes += total_len;
+ rx_ring->per_napi_packets += work_done;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bytes += total_len;
+ rx_ring->rx_stats.cnt += work_done;
+ rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ rx_ring->next_to_clean = next_to_clean;
+
+ refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+
+ /* Optimization, try to batch new rx buffers */
+ if (refill_required > refill_threshold) {
+ ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ ena_refill_rx_bufs(rx_ring, refill_required);
+ }
+
+ return work_done;
+
+error:
+ adapter = netdev_priv(rx_ring->netdev);
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_desc_num++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ /* Too many desc from the device. Trigger reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ return 0;
+}
+
+inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
+ struct ena_ring *tx_ring)
+{
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
+ rx_ring->per_napi_packets,
+ rx_ring->per_napi_bytes,
+ &rx_ring->smoothed_interval,
+ &rx_ring->moder_tbl_idx);
+
+ /* Reset per napi packets/bytes */
+ tx_ring->per_napi_packets = 0;
+ tx_ring->per_napi_bytes = 0;
+ rx_ring->per_napi_packets = 0;
+ rx_ring->per_napi_bytes = 0;
+}
+
+static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
+{
+ int cpu = get_cpu();
+ int numa_node;
+
+ /* Check only one ring since the 2 rings are running on the same cpu */
+ if (likely(tx_ring->cpu == cpu))
+ goto out;
+
+ numa_node = cpu_to_node(cpu);
+ put_cpu();
+
+ if (numa_node != NUMA_NO_NODE) {
+ ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
+ }
+
+ tx_ring->cpu = cpu;
+ rx_ring->cpu = cpu;
+
+ return;
+out:
+ put_cpu();
+}
+
+static int ena_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring, *rx_ring;
+ struct ena_eth_io_intr_reg intr_reg;
+
+ u32 tx_work_done;
+ u32 rx_work_done;
+ int tx_budget;
+ int napi_comp_call = 0;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+ rx_ring = ena_napi->rx_ring;
+
+ tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+
+ if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
+ napi_complete_done(napi, rx_work_done);
+
+ napi_comp_call = 1;
+ /* Tx and Rx share the same interrupt vector */
+ if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
+ ena_adjust_intr_moderation(rx_ring, tx_ring);
+
+ /* Update intr register: rx intr delay, tx intr delay and
+ * interrupt unmask
+ */
+ ena_com_update_intr_reg(&intr_reg,
+ rx_ring->smoothed_interval,
+ tx_ring->smoothed_interval,
+ true);
+
+ /* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
+ * So we use one of them to reach the intr reg
+ */
+ ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+
+ ena_update_ring_numa_node(tx_ring, rx_ring);
+
+ ret = rx_work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.napi_comp += napi_comp_call;
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ return ret;
+}
+
+static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+
+ ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
+
+ /* Don't call the aenq handler before probe is done */
+ if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
+ ena_com_aenq_intr_handler(adapter->ena_dev, data);
+
+ return IRQ_HANDLED;
+}
+
+/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
+ * @irq: interrupt number
+ * @data: pointer to a network interface private napi device structure
+ */
+static irqreturn_t ena_intr_msix_io(int irq, void *data)
+{
+ struct ena_napi *ena_napi = data;
+
+ napi_schedule(&ena_napi->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
+{
+ int i, msix_vecs, rc;
+
+ if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Error, MSI-X is already enabled\n");
+ return -EPERM;
+ }
+
+ /* Reserved the max msix vectors we might need */
+ msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
+
+ netif_dbg(adapter, probe, adapter->netdev,
+ "trying to enable MSI-X, vectors %d\n", msix_vecs);
+
+ adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry));
+
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_vecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs);
+ if (rc != 0) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Failed to enable MSI-X, vectors %d rc %d\n",
+ msix_vecs, rc);
+ return -ENOSPC;
+ }
+
+ netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n",
+ msix_vecs);
+
+ if (msix_vecs >= 1) {
+ if (ena_init_rx_cpu_rmap(adapter))
+ netif_warn(adapter, probe, adapter->netdev,
+ "Failed to map IRQs to CPUs\n");
+ }
+
+ adapter->msix_vecs = msix_vecs;
+ set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
+
+ return 0;
+}
+
+static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
+{
+ u32 cpu;
+
+ snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
+ ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
+ pci_name(adapter->pdev));
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
+ ena_intr_msix_mgmnt;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
+ adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+ cpu = cpumask_first(cpu_online_mask);
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
+ cpumask_set_cpu(cpu,
+ &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
+}
+
+static void ena_setup_io_intr(struct ena_adapter *adapter)
+{
+ struct net_device *netdev;
+ int irq_idx, i, cpu;
+
+ netdev = adapter->netdev;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+ cpu = i % num_online_cpus();
+
+ snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
+ "%s-Tx-Rx-%d", netdev->name, i);
+ adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
+ adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
+ adapter->irq_tbl[irq_idx].vector =
+ adapter->msix_entries[irq_idx].vector;
+ adapter->irq_tbl[irq_idx].cpu = cpu;
+
+ cpumask_set_cpu(cpu,
+ &adapter->irq_tbl[irq_idx].affinity_hint_mask);
+ }
+}
+
+static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
+{
+ unsigned long flags = 0;
+ struct ena_irq *irq;
+ int rc;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ irq->data);
+ if (rc) {
+ netif_err(adapter, probe, adapter->netdev,
+ "failed to request admin irq\n");
+ return rc;
+ }
+
+ netif_dbg(adapter, probe, adapter->netdev,
+ "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
+ irq->affinity_hint_mask.bits[0], irq->vector);
+
+ irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+
+ return rc;
+}
+
+static int ena_request_io_irq(struct ena_adapter *adapter)
+{
+ unsigned long flags = 0;
+ struct ena_irq *irq;
+ int rc = 0, i, k;
+
+ if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ: MSI-X is not enabled\n");
+ return -EINVAL;
+ }
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ irq->data);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ. index %d rc %d\n",
+ i, rc);
+ goto err;
+ }
+
+ netif_dbg(adapter, ifup, adapter->netdev,
+ "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
+ i, irq->affinity_hint_mask.bits[0], irq->vector);
+
+ irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+ }
+
+ return rc;
+
+err:
+ for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
+ irq = &adapter->irq_tbl[k];
+ free_irq(irq->vector, irq->data);
+ }
+
+ return rc;
+}
+
+static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ synchronize_irq(irq->vector);
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_irq(irq->vector, irq->data);
+}
+
+static void ena_free_io_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (adapter->msix_vecs >= 1) {
+ free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
+ adapter->netdev->rx_cpu_rmap = NULL;
+ }
+#endif /* CONFIG_RFS_ACCEL */
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_irq(irq->vector, irq->data);
+ }
+}
+
+static void ena_disable_msix(struct ena_adapter *adapter)
+{
+ if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
+ pci_disable_msix(adapter->pdev);
+
+ if (adapter->msix_entries)
+ vfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
+{
+ int i;
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
+ synchronize_irq(adapter->irq_tbl[i].vector);
+}
+
+static void ena_del_napi(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ netif_napi_del(&adapter->ena_napi[i].napi);
+}
+
+static void ena_init_napi(struct ena_adapter *adapter)
+{
+ struct ena_napi *napi;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ napi = &adapter->ena_napi[i];
+
+ netif_napi_add(adapter->netdev,
+ &adapter->ena_napi[i].napi,
+ ena_io_poll,
+ ENA_NAPI_BUDGET);
+ napi->rx_ring = &adapter->rx_ring[i];
+ napi->tx_ring = &adapter->tx_ring[i];
+ napi->qid = i;
+ }
+}
+
+static void ena_napi_disable_all(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_disable(&adapter->ena_napi[i].napi);
+}
+
+static void ena_napi_enable_all(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_enable(&adapter->ena_napi[i].napi);
+}
+
+static void ena_restore_ethtool_params(struct ena_adapter *adapter)
+{
+ adapter->tx_usecs = 0;
+ adapter->rx_usecs = 0;
+ adapter->tx_frames = 1;
+ adapter->rx_frames = 1;
+}
+
+/* Configure the Rx forwarding */
+static int ena_rss_configure(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc;
+
+ /* In case the RSS table wasn't initialized by probe */
+ if (!ena_dev->rss.tbl_log_size) {
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EPERM)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to init RSS rc: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set indirect table */
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != -EPERM))
+ return rc;
+
+ /* Configure hash function (if supported) */
+ rc = ena_com_set_hash_function(ena_dev);
+ if (unlikely(rc && (rc != -EPERM)))
+ return rc;
+
+ /* Configure hash inputs (if supported) */
+ rc = ena_com_set_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != -EPERM)))
+ return rc;
+
+ return 0;
+}
+
+static int ena_up_complete(struct ena_adapter *adapter)
+{
+ int rc, i;
+
+ rc = ena_rss_configure(adapter);
+ if (rc)
+ return rc;
+
+ ena_init_napi(adapter);
+
+ ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
+
+ ena_refill_all_rx_bufs(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(adapter->netdev);
+
+ ena_restore_ethtool_params(adapter);
+
+ ena_napi_enable_all(adapter);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
+ return 0;
+}
+
+static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
+{
+ struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *tx_ring;
+ u32 msix_vector;
+ u16 ena_qid;
+ int rc;
+
+ ena_dev = adapter->ena_dev;
+
+ tx_ring = &adapter->tx_ring[qid];
+ msix_vector = ENA_IO_IRQ_IDX(qid);
+ ena_qid = ENA_IO_TXQ_IDX(qid);
+
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+ ctx.qid = ena_qid;
+ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ ctx.msix_vector = msix_vector;
+ ctx.queue_size = adapter->tx_ring_size;
+ ctx.numa_node = cpu_to_node(tx_ring->cpu);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to create I/O TX queue num %d rc: %d\n",
+ qid, rc);
+ return rc;
+ }
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &tx_ring->ena_com_io_sq,
+ &tx_ring->ena_com_io_cq);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
+ qid, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ }
+
+ ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
+ return rc;
+}
+
+static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc, i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_create_io_tx_queue(adapter, i);
+ if (rc)
+ goto create_err;
+ }
+
+ return 0;
+
+create_err:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
+
+ return rc;
+}
+
+static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
+{
+ struct ena_com_dev *ena_dev;
+ struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_ring *rx_ring;
+ u32 msix_vector;
+ u16 ena_qid;
+ int rc;
+
+ ena_dev = adapter->ena_dev;
+
+ rx_ring = &adapter->rx_ring[qid];
+ msix_vector = ENA_IO_IRQ_IDX(qid);
+ ena_qid = ENA_IO_RXQ_IDX(qid);
+
+ ctx.qid = ena_qid;
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+ ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ ctx.msix_vector = msix_vector;
+ ctx.queue_size = adapter->rx_ring_size;
+ ctx.numa_node = cpu_to_node(rx_ring->cpu);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to create I/O RX queue num %d rc: %d\n",
+ qid, rc);
+ return rc;
+ }
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &rx_ring->ena_com_io_sq,
+ &rx_ring->ena_com_io_cq);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
+ qid, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ }
+
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
+
+ return rc;
+}
+
+static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc, i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_create_io_rx_queue(adapter, i);
+ if (rc)
+ goto create_err;
+ }
+
+ return 0;
+
+create_err:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
+
+ return rc;
+}
+
+static int ena_up(struct ena_adapter *adapter)
+{
+ int rc;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ ena_setup_io_intr(adapter);
+
+ rc = ena_request_io_irq(adapter);
+ if (rc)
+ goto err_req_irq;
+
+ /* allocate transmit descriptors */
+ rc = ena_setup_all_tx_resources(adapter);
+ if (rc)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ rc = ena_setup_all_rx_resources(adapter);
+ if (rc)
+ goto err_setup_rx;
+
+ /* Create TX queues */
+ rc = ena_create_all_io_tx_queues(adapter);
+ if (rc)
+ goto err_create_tx_queues;
+
+ /* Create RX queues */
+ rc = ena_create_all_io_rx_queues(adapter);
+ if (rc)
+ goto err_create_rx_queues;
+
+ rc = ena_up_complete(adapter);
+ if (rc)
+ goto err_up;
+
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.interface_up++;
+ u64_stats_update_end(&adapter->syncp);
+
+ set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ return rc;
+
+err_up:
+ ena_destroy_all_rx_queues(adapter);
+err_create_rx_queues:
+ ena_destroy_all_tx_queues(adapter);
+err_create_tx_queues:
+ ena_free_all_io_rx_resources(adapter);
+err_setup_rx:
+ ena_free_all_io_tx_resources(adapter);
+err_setup_tx:
+ ena_free_io_irq(adapter);
+err_req_irq:
+
+ return rc;
+}
+
+static void ena_down(struct ena_adapter *adapter)
+{
+ netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+
+ clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.interface_down++;
+ u64_stats_update_end(&adapter->syncp);
+
+ /* After this point the napi handler won't enable the tx queue */
+ ena_napi_disable_all(adapter);
+ netif_carrier_off(adapter->netdev);
+ netif_tx_disable(adapter->netdev);
+
+ /* After destroy the queue there won't be any new interrupts */
+ ena_destroy_all_io_queues(adapter);
+
+ ena_disable_io_intr_sync(adapter);
+ ena_free_io_irq(adapter);
+ ena_del_napi(adapter);
+
+ ena_free_all_tx_bufs(adapter);
+ ena_free_all_rx_bufs(adapter);
+ ena_free_all_io_tx_resources(adapter);
+ ena_free_all_io_rx_resources(adapter);
+}
+
+/* ena_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int ena_open(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ /* Notify the stack of the actual queue counts. */
+ rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
+ if (rc) {
+ netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
+ return rc;
+ }
+
+ rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
+ if (rc) {
+ netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
+ return rc;
+ }
+
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/* ena_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int ena_close(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
+
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
+
+ return 0;
+}
+
+static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+ u8 l4_protocol = 0;
+
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
+ ena_tx_ctx->l4_csum_enable = 1;
+ if (mss) {
+ ena_tx_ctx->tso_enable = 1;
+ ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
+ ena_tx_ctx->l4_csum_partial = 0;
+ } else {
+ ena_tx_ctx->tso_enable = 0;
+ ena_meta->l4_hdr_len = 0;
+ ena_tx_ctx->l4_csum_partial = 1;
+ }
+
+ switch (ip_hdr(skb)->version) {
+ case IPVERSION:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+ if (ip_hdr(skb)->frag_off & htons(IP_DF))
+ ena_tx_ctx->df = 1;
+ if (mss)
+ ena_tx_ctx->l3_csum_enable = 1;
+ l4_protocol = ip_hdr(skb)->protocol;
+ break;
+ case 6:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
+ l4_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ if (l4_protocol == IPPROTO_TCP)
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
+ else
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
+
+ ena_meta->mss = mss;
+ ena_meta->l3_hdr_len = skb_network_header_len(skb);
+ ena_meta->l3_hdr_offset = skb_network_offset(skb);
+ ena_tx_ctx->meta_valid = 1;
+
+ } else {
+ ena_tx_ctx->meta_valid = 0;
+ }
+}
+
+static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ int num_frags, header_len, rc;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ header_len = skb_headlen(skb);
+
+ if (num_frags < tx_ring->sgl_size)
+ return 0;
+
+ if ((num_frags == tx_ring->sgl_size) &&
+ (header_len < tx_ring->tx_max_header_size))
+ return 0;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.linearize++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ rc = skb_linearize(skb);
+ if (unlikely(rc)) {
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.linearize_failed++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+
+ return rc;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_ring *tx_ring;
+ struct netdev_queue *txq;
+ struct ena_com_buf *ena_buf;
+ void *push_hdr;
+ u32 len, last_frag;
+ u16 next_to_use;
+ u16 req_id;
+ u16 push_len;
+ u16 header_len;
+ dma_addr_t dma;
+ int qid, rc, nb_hw_desc;
+ int i = -1;
+
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+ /* Determine which tx ring we will be placed on */
+ qid = skb_get_queue_mapping(skb);
+ tx_ring = &adapter->tx_ring[qid];
+ txq = netdev_get_tx_queue(dev, qid);
+
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ skb_tx_timestamp(skb);
+ len = skb_headlen(skb);
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+ ena_buf = tx_info->bufs;
+ tx_info->skb = skb;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* prepared the push buffer */
+ push_len = min_t(u32, len, tx_ring->tx_max_header_size);
+ header_len = push_len;
+ push_hdr = skb->data;
+ } else {
+ push_len = 0;
+ header_len = min_t(u32, len, tx_ring->tx_max_header_size);
+ push_hdr = NULL;
+ }
+
+ netif_dbg(adapter, tx_queued, dev,
+ "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
+ push_hdr, push_len);
+
+ if (len > push_len) {
+ dma = dma_map_single(tx_ring->dev, skb->data + push_len,
+ len - push_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto error_report_dma_error;
+
+ ena_buf->paddr = dma;
+ ena_buf->len = len - push_len;
+
+ ena_buf++;
+ tx_info->num_of_bufs++;
+ }
+
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < last_frag; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto error_report_dma_error;
+
+ ena_buf->paddr = dma;
+ ena_buf->len = len;
+ ena_buf++;
+ }
+
+ tx_info->num_of_bufs += last_frag;
+
+ memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = header_len;
+
+ /* set flags and meta data */
+ ena_tx_csum(&ena_tx_ctx, skb);
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
+ &nb_hw_desc);
+
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "failed to prepare tx bufs\n");
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_stop++;
+ tx_ring->tx_stats.prepare_ctx_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netif_tx_stop_queue(txq);
+ goto error_unmap_dma;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.cnt++;
+ tx_ring->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ tx_info->tx_descs = nb_hw_desc;
+ tx_info->last_jiffies = jiffies;
+
+ tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ tx_ring->ring_size);
+
+ /* This WMB is aimed to:
+ * 1 - perform smp barrier before reading next_to_completion
+ * 2 - make sure the desc were written before trigger DB
+ */
+ wmb();
+
+ /* stop the queue when no more space available, the packet can have up
+ * to sgl_size + 2. one for the meta descriptor and one for header
+ * (if the header is larger than tx_max_header_size).
+ */
+ if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
+ (tx_ring->sgl_size + 2))) {
+ netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
+ __func__, qid);
+
+ netif_tx_stop_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_stop++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* There is a rare condition where this function decide to
+ * stop the queue but meanwhile clean_tx_irq updates
+ * next_to_completion and terminates.
+ * The queue will remain stopped forever.
+ * To solve this issue this function perform rmb, check
+ * the wakeup condition and wake up the queue if needed.
+ */
+ smp_rmb();
+
+ if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
+ > ENA_TX_WAKEUP_THRESH) {
+ netif_tx_wake_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_wakeup++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+ }
+
+ if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+ /* trigger the dma engine */
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.doorbells++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+
+ return NETDEV_TX_OK;
+
+error_report_dma_error:
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map skb\n");
+
+ tx_info->skb = NULL;
+
+error_unmap_dma:
+ if (i >= 0) {
+ /* save value of frag that failed */
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ tx_info->skb = NULL;
+ ena_buf = tx_info->bufs;
+ dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+ }
+ }
+
+error_drop_packet:
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ena_netpoll(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ u16 qid;
+ /* we suspect that this is good for in--kernel network services that
+ * want to loop incoming skb rx to tx in normal user generated traffic,
+ * most probably we will not get to this
+ */
+ if (skb_rx_queue_recorded(skb))
+ qid = skb_get_rx_queue(skb);
+ else
+ qid = fallback(dev, skb);
+
+ return qid;
+}
+
+static void ena_config_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_host_info *host_info;
+ int rc;
+
+ /* Allocate only the host info */
+ rc = ena_com_allocate_host_info(ena_dev);
+ if (rc) {
+ pr_err("Cannot allocate host info\n");
+ return;
+ }
+
+ host_info = ena_dev->host_attr.host_info;
+
+ host_info->os_type = ENA_ADMIN_OS_LINUX;
+ host_info->kernel_ver = LINUX_VERSION_CODE;
+ strncpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str) - 1);
+ host_info->os_dist = 0;
+ strncpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str) - 1);
+ host_info->driver_version =
+ (DRV_MODULE_VER_MAJOR) |
+ (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+
+ rc = ena_com_set_host_attributes(ena_dev);
+ if (rc) {
+ if (rc == -EPERM)
+ pr_warn("Cannot set host attributes\n");
+ else
+ pr_err("Cannot set host attributes\n");
+
+ goto err;
+ }
+
+ return;
+
+err:
+ ena_com_delete_host_info(ena_dev);
+}
+
+static void ena_config_debug_area(struct ena_adapter *adapter)
+{
+ u32 debug_area_size;
+ int rc, ss_count;
+
+ ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
+ if (ss_count <= 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SS count is negative\n");
+ return;
+ }
+
+ /* allocate 32 bytes for each string and 64bit for the value */
+ debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
+
+ rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
+ if (rc) {
+ pr_err("Cannot allocate debug area\n");
+ return;
+ }
+
+ rc = ena_com_set_host_attributes(adapter->ena_dev);
+ if (rc) {
+ if (rc == -EPERM)
+ netif_warn(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+ goto err;
+ }
+
+ return;
+err:
+ ena_com_delete_debug_area(adapter->ena_dev);
+}
+
+static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_admin_basic_stats ena_stats;
+ int rc;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return NULL;
+
+ rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
+ if (rc)
+ return NULL;
+
+ stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) |
+ ena_stats.tx_bytes_low;
+ stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) |
+ ena_stats.rx_bytes_low;
+
+ stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) |
+ ena_stats.rx_pkts_low;
+ stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) |
+ ena_stats.tx_pkts_low;
+
+ stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) |
+ ena_stats.rx_drops_low;
+
+ stats->multicast = 0;
+ stats->collisions = 0;
+
+ stats->rx_length_errors = 0;
+ stats->rx_crc_errors = 0;
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = 0;
+ stats->rx_missed_errors = 0;
+ stats->tx_window_errors = 0;
+
+ stats->rx_errors = 0;
+ stats->tx_errors = 0;
+
+ return stats;
+}
+
+static const struct net_device_ops ena_netdev_ops = {
+ .ndo_open = ena_open,
+ .ndo_stop = ena_close,
+ .ndo_start_xmit = ena_start_xmit,
+ .ndo_select_queue = ena_select_queue,
+ .ndo_get_stats64 = ena_get_stats64,
+ .ndo_tx_timeout = ena_tx_timeout,
+ .ndo_change_mtu = ena_change_mtu,
+ .ndo_set_mac_address = NULL,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ena_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+};
+
+static void ena_device_io_suspend(struct work_struct *work)
+{
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, suspend_io_task);
+ struct net_device *netdev = adapter->netdev;
+
+ /* ena_napi_disable_all disables only the IO handling.
+ * We are still subject to AENQ keep alive watchdog.
+ */
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.io_suspend++;
+ u64_stats_update_begin(&adapter->syncp);
+ ena_napi_disable_all(adapter);
+ netif_tx_lock(netdev);
+ netif_device_detach(netdev);
+ netif_tx_unlock(netdev);
+}
+
+static void ena_device_io_resume(struct work_struct *work)
+{
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, resume_io_task);
+ struct net_device *netdev = adapter->netdev;
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.io_resume++;
+ u64_stats_update_end(&adapter->syncp);
+
+ netif_device_attach(netdev);
+ ena_napi_enable_all(adapter);
+}
+
+static int ena_device_validate_params(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
+ adapter->mac_addr);
+ if (!rc) {
+ netif_err(adapter, drv, netdev,
+ "Error, mac address are different\n");
+ return -EINVAL;
+ }
+
+ if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
+ (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
+ netif_err(adapter, drv, netdev,
+ "Error, device doesn't support enough queues\n");
+ return -EINVAL;
+ }
+
+ if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
+ netif_err(adapter, drv, netdev,
+ "Error, device max mtu is smaller than netdev MTU\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
+{
+ struct device *dev = &pdev->dev;
+ bool readless_supported;
+ u32 aenq_groups;
+ int dma_width;
+ int rc;
+
+ rc = ena_com_mmio_reg_read_request_init(ena_dev);
+ if (rc) {
+ dev_err(dev, "failed to init mmio read less\n");
+ return rc;
+ }
+
+ /* The PCIe configuration space revision id indicate if mmio reg
+ * read is disabled
+ */
+ readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
+ ena_com_set_mmio_read_mode(ena_dev, readless_supported);
+
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ dev_err(dev, "Can not reset device\n");
+ goto err_mmio_read_less;
+ }
+
+ rc = ena_com_validate_version(ena_dev);
+ if (rc) {
+ dev_err(dev, "device version is too low\n");
+ goto err_mmio_read_less;
+ }
+
+ dma_width = ena_com_get_dma_width(ena_dev);
+ if (dma_width < 0) {
+ dev_err(dev, "Invalid dma width value %d", dma_width);
+ rc = dma_width;
+ goto err_mmio_read_less;
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (rc) {
+ dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
+ goto err_mmio_read_less;
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (rc) {
+ dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
+ rc);
+ goto err_mmio_read_less;
+ }
+
+ /* ENA admin level init */
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ if (rc) {
+ dev_err(dev,
+ "Can not initialize ena admin queue with device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* To enable the msix interrupts the driver needs to know the number
+ * of queues. So the driver uses polling mode to retrieve this
+ * information
+ */
+ ena_com_set_admin_polling_mode(ena_dev, true);
+
+ /* Get Device Attributes*/
+ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
+ if (rc) {
+ dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
+ goto err_admin_init;
+ }
+
+ /* Try to turn all the available aenq groups */
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
+ ena_config_host_info(ena_dev);
+
+ return 0;
+
+err_admin_init:
+ ena_com_admin_destroy(ena_dev);
+err_mmio_read_less:
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ return rc;
+}
+
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
+ int io_vectors)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct device *dev = &adapter->pdev->dev;
+ int rc;
+
+ rc = ena_enable_msix(adapter, io_vectors);
+ if (rc) {
+ dev_err(dev, "Can not reserve msix vectors\n");
+ return rc;
+ }
+
+ ena_setup_mgmnt_intr(adapter);
+
+ rc = ena_request_mgmnt_irq(adapter);
+ if (rc) {
+ dev_err(dev, "Can not setup management interrupts\n");
+ goto err_disable_msix;
+ }
+
+ ena_com_set_admin_polling_mode(ena_dev, false);
+
+ ena_com_admin_aenq_enable(ena_dev);
+
+ return 0;
+
+err_disable_msix:
+ ena_disable_msix(adapter);
+
+ return rc;
+}
+
+static void ena_fw_reset_device(struct work_struct *work)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, reset_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct pci_dev *pdev = adapter->pdev;
+ bool dev_up, wd_state;
+ int rc;
+
+ del_timer_sync(&adapter->timer_service);
+
+ rtnl_lock();
+
+ dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ /* After calling ena_close the tx queues and the napi
+ * are disabled so no one can interfere or touch the
+ * data structures
+ */
+ ena_close(netdev);
+
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Device reset failed\n");
+ goto err;
+ }
+
+ ena_free_mgmnt_irq(adapter);
+
+ ena_disable_msix(adapter);
+
+ ena_com_abort_admin_commands(ena_dev);
+
+ ena_com_wait_for_abort_completion(ena_dev);
+
+ ena_com_admin_destroy(ena_dev);
+
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ /* Finish with the destroy part. Start the init part */
+
+ rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not initialize device\n");
+ goto err;
+ }
+ adapter->wd_state = wd_state;
+
+ rc = ena_device_validate_params(adapter, &get_feat_ctx);
+ if (rc) {
+ dev_err(&pdev->dev, "Validation of device parameters failed\n");
+ goto err_device_destroy;
+ }
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter,
+ adapter->num_queues);
+ if (rc) {
+ dev_err(&pdev->dev, "Enable MSI-X failed\n");
+ goto err_device_destroy;
+ }
+ /* If the interface was up before the reset bring it up */
+ if (dev_up) {
+ rc = ena_up(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to create I/O queues\n");
+ goto err_disable_msix;
+ }
+ }
+
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+
+ rtnl_unlock();
+
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
+
+ return;
+err_disable_msix:
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_device_destroy:
+ ena_com_admin_destroy(ena_dev);
+err:
+ rtnl_unlock();
+
+ dev_err(&pdev->dev,
+ "Reset attempt failed. Can not reset the device\n");
+}
+
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
+ struct ena_tx_buffer *tx_buf;
+ unsigned long last_jiffies;
+ struct ena_ring *tx_ring;
+ int i, j, budget;
+ u32 missed_tx;
+
+ /* Make sure the driver doesn't turn the device in other process */
+ smp_rmb();
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return;
+
+ budget = ENA_MONITORED_TX_QUEUES;
+
+ for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+
+ for (j = 0; j < tx_ring->ring_size; j++) {
+ tx_buf = &tx_ring->tx_buffer_info[j];
+ last_jiffies = tx_buf->last_jiffies;
+ if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
+ tx_ring->qid, j);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ missed_tx = tx_ring->tx_stats.missing_tx_comp++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* Clear last jiffies so the lost buffer won't
+ * be counted twice.
+ */
+ tx_buf->last_jiffies = 0;
+
+ if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+ netif_err(adapter, tx_err, adapter->netdev,
+ "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
+ missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ }
+ }
+
+ budget--;
+ if (!budget)
+ break;
+ }
+
+ adapter->last_monitored_tx_qid = i % adapter->num_queues;
+}
+
+/* Check for keep alive expiration */
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ unsigned long keep_alive_expired;
+
+ if (!adapter->wd_state)
+ return;
+
+ keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies
+ + ENA_DEVICE_KALIVE_TIMEOUT);
+ if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Keep alive watchdog timeout.\n");
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.wd_expired++;
+ u64_stats_update_end(&adapter->syncp);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+}
+
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "ENA admin queue is not in running state!\n");
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.admin_q_pause++;
+ u64_stats_update_end(&adapter->syncp);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+}
+
+static void ena_update_host_info(struct ena_admin_host_info *host_info,
+ struct net_device *netdev)
+{
+ host_info->supported_network_features[0] =
+ netdev->features & GENMASK_ULL(31, 0);
+ host_info->supported_network_features[1] =
+ (netdev->features & GENMASK_ULL(63, 32)) >> 32;
+}
+
+static void ena_timer_service(unsigned long data)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
+ struct ena_admin_host_info *host_info =
+ adapter->ena_dev->host_attr.host_info;
+
+ check_for_missing_keep_alive(adapter);
+
+ check_for_admin_com_state(adapter);
+
+ check_for_missing_tx_completions(adapter);
+
+ if (debug_area)
+ ena_dump_stats_to_buf(adapter, debug_area);
+
+ if (host_info)
+ ena_update_host_info(host_info, adapter->netdev);
+
+ if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Trigger reset is on\n");
+ ena_dump_stats_to_dmesg(adapter);
+ queue_work(ena_wq, &adapter->reset_task);
+ return;
+ }
+
+ /* Reset the timer */
+ mod_timer(&adapter->timer_service, jiffies + HZ);
+}
+
+static int ena_calc_io_queue_num(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_queue_num;
+
+ /* In case of LLQ use the llq number in the get feature cmd */
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq_num = get_feat_ctx->max_queues.max_llq_num;
+
+ if (io_sq_num == 0) {
+ dev_err(&pdev->dev,
+ "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
+
+ ena_dev->tx_mem_queue_type =
+ ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ }
+ } else {
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ }
+
+ io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ io_queue_num = min_t(int, io_queue_num, io_sq_num);
+ io_queue_num = min_t(int, io_queue_num,
+ get_feat_ctx->max_queues.max_cq_num);
+ /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
+ io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
+ if (unlikely(!io_queue_num)) {
+ dev_err(&pdev->dev, "The device doesn't have io queues\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
+static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ bool has_mem_bar;
+
+ has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
+
+ /* Enable push mode if device supports LLQ */
+ if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+ else
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+}
+
+static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
+ struct net_device *netdev)
+{
+ netdev_features_t dev_features = 0;
+
+ /* Set offload features */
+ if (feat->offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ dev_features |= NETIF_F_IP_CSUM;
+
+ if (feat->offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+ dev_features |= NETIF_F_IPV6_CSUM;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ dev_features |= NETIF_F_TSO;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
+ dev_features |= NETIF_F_TSO6;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
+ dev_features |= NETIF_F_TSO_ECN;
+
+ if (feat->offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ dev_features |= NETIF_F_RXCSUM;
+
+ if (feat->offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+ dev_features |= NETIF_F_RXCSUM;
+
+ netdev->features =
+ dev_features |
+ NETIF_F_SG |
+ NETIF_F_NTUPLE |
+ NETIF_F_RXHASH |
+ NETIF_F_HIGHDMA;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+}
+
+static void ena_set_conf_feat_params(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *feat)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* Copy mac address */
+ if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
+ } else {
+ ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
+ ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ }
+
+ /* Set offload features */
+ ena_set_dev_offloads(feat, netdev);
+
+ adapter->max_mtu = feat->dev_attr.max_mtu;
+}
+
+static int ena_rss_init_default(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct device *dev = &adapter->pdev->dev;
+ int rc, i;
+ u32 val;
+
+ rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
+ if (unlikely(rc)) {
+ dev_err(dev, "Cannot init indirect table\n");
+ goto err_rss_init;
+ }
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ val = ethtool_rxfh_indir_default(i, adapter->num_queues);
+ rc = ena_com_indirect_table_fill_entry(ena_dev, i,
+ ENA_IO_RXQ_IDX(val));
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill indirect table\n");
+ goto err_fill_indir;
+ }
+ }
+
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill hash function\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_set_default_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill hash control\n");
+ goto err_fill_indir;
+ }
+
+ return 0;
+
+err_fill_indir:
+ ena_com_rss_destroy(ena_dev);
+err_rss_init:
+
+ return rc;
+}
+
+static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
+{
+ int release_bars;
+
+ release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ pci_release_selected_regions(pdev, release_bars);
+}
+
+static int ena_calc_queue_size(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
+ u16 *max_rx_sgl_size,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ u32 queue_size = ENA_DEFAULT_RING_SIZE;
+
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_cq_depth);
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_sq_depth);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_llq_depth);
+
+ queue_size = rounddown_pow_of_two(queue_size);
+
+ if (unlikely(!queue_size)) {
+ dev_err(&pdev->dev, "Invalid queue size\n");
+ return -EFAULT;
+ }
+
+ *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+ *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_rx_descs);
+
+ return queue_size;
+}
+
+/* ena_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ena_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ena_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ static int version_printed;
+ struct net_device *netdev;
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev = NULL;
+ static int adapters_found;
+ int io_queue_num, bars, rc;
+ int queue_size;
+ u16 tx_sgl_size = 0;
+ u16 rx_sgl_size = 0;
+ bool wd_state;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ if (version_printed++ == 0)
+ dev_info(&pdev->dev, "%s", version);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ ena_dev = vzalloc(sizeof(*ena_dev));
+ if (!ena_dev) {
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
+ rc);
+ goto err_free_ena_dev;
+ }
+
+ ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
+ pci_resource_len(pdev, ENA_REG_BAR));
+ if (!ena_dev->reg_bar) {
+ dev_err(&pdev->dev, "failed to remap regs bar\n");
+ rc = -EFAULT;
+ goto err_free_region;
+ }
+
+ ena_dev->dmadev = &pdev->dev;
+
+ rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ dev_err(&pdev->dev, "ena device init failed\n");
+ if (rc == -ETIME)
+ rc = -EPROBE_DEFER;
+ goto err_free_region;
+ }
+
+ ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
+ if (!ena_dev->mem_bar) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
+ }
+
+ /* initial Tx interrupt delay, Assumes 1 usec granularity.
+ * Updated during device initialization with the real granularity
+ */
+ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
+ io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
+ queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
+ &rx_sgl_size, &get_feat_ctx);
+ if ((queue_size <= 0) || (io_queue_num <= 0)) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
+
+ dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
+ io_queue_num, queue_size);
+
+ /* dev zeroed in init_etherdev */
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
+ rc = -ENOMEM;
+ goto err_device_destroy;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->ena_dev = ena_dev;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ ena_set_conf_feat_params(adapter, &get_feat_ctx);
+
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ adapter->tx_ring_size = queue_size;
+ adapter->rx_ring_size = queue_size;
+
+ adapter->max_tx_sgl_size = tx_sgl_size;
+ adapter->max_rx_sgl_size = rx_sgl_size;
+
+ adapter->num_queues = io_queue_num;
+ adapter->last_monitored_tx_qid = 0;
+
+ adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
+ adapter->wd_state = wd_state;
+
+ snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
+
+ rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to query interrupt moderation feature\n");
+ goto err_netdev_destroy;
+ }
+ ena_init_io_rings(adapter);
+
+ netdev->netdev_ops = &ena_netdev_ops;
+ netdev->watchdog_timeo = TX_TIMEOUT;
+ ena_set_ethtool_ops(netdev);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ u64_stats_init(&adapter->syncp);
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to enable and set the admin interrupts\n");
+ goto err_worker_destroy;
+ }
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EPERM)) {
+ dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
+ goto err_free_msix;
+ }
+
+ ena_config_debug_area(adapter);
+
+ memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
+
+ netif_carrier_off(netdev);
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto err_rss;
+ }
+
+ INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend);
+ INIT_WORK(&adapter->resume_io_task, ena_device_io_resume);
+ INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
+
+ adapter->last_keep_alive_jiffies = jiffies;
+
+ init_timer(&adapter->timer_service);
+ adapter->timer_service.expires = round_jiffies(jiffies + HZ);
+ adapter->timer_service.function = ena_timer_service;
+ adapter->timer_service.data = (unsigned long)adapter;
+
+ add_timer(&adapter->timer_service);
+
+ dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+ DEVICE_NAME, (long)pci_resource_start(pdev, 0),
+ netdev->dev_addr, io_queue_num);
+
+ set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ adapters_found++;
+
+ return 0;
+
+err_rss:
+ ena_com_delete_debug_area(ena_dev);
+ ena_com_rss_destroy(ena_dev);
+err_free_msix:
+ ena_com_dev_reset(ena_dev);
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_worker_destroy:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ del_timer(&adapter->timer_service);
+ cancel_work_sync(&adapter->suspend_io_task);
+ cancel_work_sync(&adapter->resume_io_task);
+err_netdev_destroy:
+ free_netdev(netdev);
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+err_free_region:
+ ena_release_bars(ena_dev, pdev);
+err_free_ena_dev:
+ vfree(ena_dev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+/*****************************************************************************/
+static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
+{
+ int rc;
+
+ if (numvfs > 0) {
+ rc = pci_enable_sriov(dev, numvfs);
+ if (rc != 0) {
+ dev_err(&dev->dev,
+ "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
+ numvfs, rc);
+ return rc;
+ }
+
+ return numvfs;
+ }
+
+ if (numvfs == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* ena_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void ena_remove(struct pci_dev *pdev)
+{
+ struct ena_adapter *adapter = pci_get_drvdata(pdev);
+ struct ena_com_dev *ena_dev;
+ struct net_device *netdev;
+
+ if (!adapter)
+ /* This device didn't load properly and it's resources
+ * already released, nothing to do
+ */
+ return;
+
+ ena_dev = adapter->ena_dev;
+ netdev = adapter->netdev;
+
+#ifdef CONFIG_RFS_ACCEL
+ if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+ }
+#endif /* CONFIG_RFS_ACCEL */
+
+ unregister_netdev(netdev);
+ del_timer_sync(&adapter->timer_service);
+
+ cancel_work_sync(&adapter->reset_task);
+
+ cancel_work_sync(&adapter->suspend_io_task);
+
+ cancel_work_sync(&adapter->resume_io_task);
+
+ ena_com_dev_reset(ena_dev);
+
+ ena_free_mgmnt_irq(adapter);
+
+ ena_disable_msix(adapter);
+
+ free_netdev(netdev);
+
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ ena_com_abort_admin_commands(ena_dev);
+
+ ena_com_wait_for_abort_completion(ena_dev);
+
+ ena_com_admin_destroy(ena_dev);
+
+ ena_com_rss_destroy(ena_dev);
+
+ ena_com_delete_debug_area(ena_dev);
+
+ ena_com_delete_host_info(ena_dev);
+
+ ena_release_bars(ena_dev, pdev);
+
+ pci_disable_device(pdev);
+
+ ena_com_destroy_interrupt_moderation(ena_dev);
+
+ vfree(ena_dev);
+}
+
+static struct pci_driver ena_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ena_pci_tbl,
+ .probe = ena_probe,
+ .remove = ena_remove,
+ .sriov_configure = ena_sriov_configure,
+};
+
+static int __init ena_init(void)
+{
+ pr_info("%s", version);
+
+ ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
+ if (!ena_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return pci_register_driver(&ena_pci_driver);
+}
+
+static void __exit ena_cleanup(void)
+{
+ pci_unregister_driver(&ena_pci_driver);
+
+ if (ena_wq) {
+ destroy_workqueue(ena_wq);
+ ena_wq = NULL;
+ }
+}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+/* ena_update_on_link_change:
+ * Notify the network interface about the change in link status
+ */
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_link_change_desc *aenq_desc =
+ (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ int status = aenq_desc->flags &
+ ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+
+ if (status) {
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+ set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
+ netif_carrier_on(adapter->netdev);
+ } else {
+ clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
+ netif_carrier_off(adapter->netdev);
+ }
+}
+
+static void ena_keep_alive_wd(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->last_keep_alive_jiffies = jiffies;
+}
+
+static void ena_notification(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
+ "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_SUSPEND:
+ /* Suspend just the IO queues.
+ * We deliberately don't suspend admin so the timer and
+ * the keep_alive events should remain.
+ */
+ queue_work(ena_wq, &adapter->suspend_io_task);
+ break;
+ case ENA_ADMIN_RESUME:
+ queue_work(ena_wq, &adapter->resume_io_task);
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+/* This handler will called for unknown event group or unimplemented handlers*/
+static void unimplemented_aenq_handler(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+
+ netif_err(adapter, drv, adapter->netdev,
+ "Unknown event was received or event with unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
+
+module_init(ena_init);
+module_exit(ena_cleanup);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
new file mode 100644
index 000000000000..69d7e9ed5bc8
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_H
+#define ENA_H
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+#include "ena_com.h"
+#include "ena_eth_com.h"
+
+#define DRV_MODULE_VER_MAJOR 1
+#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_SUBMINOR 2
+
+#define DRV_MODULE_NAME "ena"
+#ifndef DRV_MODULE_VERSION
+#define DRV_MODULE_VERSION \
+ __stringify(DRV_MODULE_VER_MAJOR) "." \
+ __stringify(DRV_MODULE_VER_MINOR) "." \
+ __stringify(DRV_MODULE_VER_SUBMINOR)
+#endif
+
+#define DEVICE_NAME "Elastic Network Adapter (ENA)"
+
+/* 1 for AENQ + ADMIN */
+#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
+
+#define ENA_REG_BAR 0
+#define ENA_MEM_BAR 2
+#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
+
+#define ENA_DEFAULT_RING_SIZE (1024)
+
+#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
+#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN)
+
+/* limit the buffer size to 600 bytes to handle MTU changes from very
+ * small to very large, in which case the number of buffers per packet
+ * could exceed ENA_PKT_MAX_BUFS
+ */
+#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
+
+#define ENA_MIN_MTU 128
+
+#define ENA_NAME_MAX_LEN 20
+#define ENA_IRQNAME_SIZE 40
+
+#define ENA_PKT_MAX_BUFS 19
+
+#define ENA_RX_RSS_TABLE_LOG_SIZE 7
+#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
+
+#define ENA_HASH_KEY_SIZE 40
+
+/* The number of tx packet completions that will be handled each NAPI poll
+ * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
+ */
+#define ENA_TX_POLL_BUDGET_DIVIDER 4
+
+/* Refill Rx queue when number of available descriptors is below
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+ */
+#define ENA_RX_REFILL_THRESH_DIVIDER 8
+
+/* Number of queues to check for missing queues per timer service */
+#define ENA_MONITORED_TX_QUEUES 4
+/* Max timeout packets before device reset */
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+
+#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+
+#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
+ (((idx) + (n)) & ((ring_size) - 1))
+
+#define ENA_IO_TXQ_IDX(q) (2 * (q))
+#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+
+#define ENA_MGMNT_IRQ_IDX 0
+#define ENA_IO_IRQ_FIRST_IDX 1
+#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+
+/* ENA device should send keep alive msg every 1 sec.
+ * We wait for 3 sec just to be on the safe side.
+ */
+#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
+
+#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+struct ena_irq {
+ irq_handler_t handler;
+ void *data;
+ int cpu;
+ u32 vector;
+ cpumask_t affinity_hint_mask;
+ char name[ENA_IRQNAME_SIZE];
+};
+
+struct ena_napi {
+ struct napi_struct napi ____cacheline_aligned;
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+ u32 qid;
+};
+
+struct ena_tx_buffer {
+ struct sk_buff *skb;
+ /* num of ena desc for this specific skb
+ * (includes data desc and metadata desc)
+ */
+ u32 tx_descs;
+ /* num of buffers used by this skb */
+ u32 num_of_bufs;
+ /* Save the last jiffies to detect missing tx packets */
+ unsigned long last_jiffies;
+ struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
+} ____cacheline_aligned;
+
+struct ena_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 page_offset;
+ struct ena_com_buf ena_buf;
+} ____cacheline_aligned;
+
+struct ena_stats_tx {
+ u64 cnt;
+ u64 bytes;
+ u64 queue_stop;
+ u64 prepare_ctx_err;
+ u64 queue_wakeup;
+ u64 dma_mapping_err;
+ u64 linearize;
+ u64 linearize_failed;
+ u64 napi_comp;
+ u64 tx_poll;
+ u64 doorbells;
+ u64 missing_tx_comp;
+ u64 bad_req_id;
+};
+
+struct ena_stats_rx {
+ u64 cnt;
+ u64 bytes;
+ u64 refil_partial;
+ u64 bad_csum;
+ u64 page_alloc_fail;
+ u64 skb_alloc_fail;
+ u64 dma_mapping_err;
+ u64 bad_desc_num;
+ u64 rx_copybreak_pkt;
+};
+
+struct ena_ring {
+ /* Holds the empty requests for TX out of order completions */
+ u16 *free_tx_ids;
+ union {
+ struct ena_tx_buffer *tx_buffer_info;
+ struct ena_rx_buffer *rx_buffer_info;
+ };
+
+ /* cache ptr to avoid using the adapter */
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct napi_struct *napi;
+ struct net_device *netdev;
+ struct ena_com_dev *ena_dev;
+ struct ena_adapter *adapter;
+ struct ena_com_io_cq *ena_com_io_cq;
+ struct ena_com_io_sq *ena_com_io_sq;
+
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 rx_copybreak;
+ u16 qid;
+ u16 mtu;
+ u16 sgl_size;
+
+ /* The maximum header length the device can handle */
+ u8 tx_max_header_size;
+
+ /* cpu for TPH */
+ int cpu;
+ /* number of tx/rx_buffer_info's entries */
+ int ring_size;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+
+ struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
+ u32 smoothed_interval;
+ u32 per_napi_packets;
+ u32 per_napi_bytes;
+ enum ena_intr_moder_level moder_tbl_idx;
+ struct u64_stats_sync syncp;
+ union {
+ struct ena_stats_tx tx_stats;
+ struct ena_stats_rx rx_stats;
+ };
+} ____cacheline_aligned;
+
+struct ena_stats_dev {
+ u64 tx_timeout;
+ u64 io_suspend;
+ u64 io_resume;
+ u64 wd_expired;
+ u64 interface_up;
+ u64 interface_down;
+ u64 admin_q_pause;
+};
+
+enum ena_flags_t {
+ ENA_FLAG_DEVICE_RUNNING,
+ ENA_FLAG_DEV_UP,
+ ENA_FLAG_LINK_UP,
+ ENA_FLAG_MSIX_ENABLED,
+ ENA_FLAG_TRIGGER_RESET
+};
+
+/* adapter specific private data structure */
+struct ena_adapter {
+ struct ena_com_dev *ena_dev;
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* rx packets that shorter that this len will be copied to the skb
+ * header
+ */
+ u32 rx_copybreak;
+ u32 max_mtu;
+
+ int num_queues;
+
+ struct msix_entry *msix_entries;
+ int msix_vecs;
+
+ u32 tx_usecs, rx_usecs; /* interrupt moderation */
+ u32 tx_frames, rx_frames; /* interrupt moderation */
+
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+
+ u32 msg_enable;
+
+ u16 max_tx_sgl_size;
+ u16 max_rx_sgl_size;
+
+ u8 mac_addr[ETH_ALEN];
+
+ char name[ENA_NAME_MAX_LEN];
+
+ unsigned long flags;
+ /* TX */
+ struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
+ ____cacheline_aligned_in_smp;
+
+ /* RX */
+ struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES]
+ ____cacheline_aligned_in_smp;
+
+ struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES];
+
+ struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)];
+
+ /* timer service */
+ struct work_struct reset_task;
+ struct work_struct suspend_io_task;
+ struct work_struct resume_io_task;
+ struct timer_list timer_service;
+
+ bool wd_state;
+ unsigned long last_keep_alive_jiffies;
+
+ struct u64_stats_sync syncp;
+ struct ena_stats_dev dev_stats;
+
+ /* last queue index that was checked for uncompleted tx packets */
+ u32 last_monitored_tx_qid;
+};
+
+void ena_set_ethtool_ops(struct net_device *netdev);
+
+void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
+
+void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
+
+int ena_get_sset_count(struct net_device *netdev, int sset);
+
+#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
new file mode 100644
index 000000000000..f80d2a47fa94
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_PCI_ID_TBL_H_
+#define ENA_PCI_ID_TBL_H_
+
+#ifndef PCI_VENDOR_ID_AMAZON
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#endif
+
+#ifndef PCI_DEV_ID_ENA_PF
+#define PCI_DEV_ID_ENA_PF 0x0ec2
+#endif
+
+#ifndef PCI_DEV_ID_ENA_LLQ_PF
+#define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2
+#endif
+
+#ifndef PCI_DEV_ID_ENA_VF
+#define PCI_DEV_ID_ENA_VF 0xec20
+#endif
+
+#ifndef PCI_DEV_ID_ENA_LLQ_VF
+#define PCI_DEV_ID_ENA_LLQ_VF 0xec21
+#endif
+
+#define ENA_PCI_ID_TABLE_ENTRY(devid) \
+ {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)},
+
+static const struct pci_device_id ena_pci_tbl[] = {
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_VF)
+ { }
+};
+
+#endif /* ENA_PCI_ID_TBL_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
new file mode 100644
index 000000000000..26097a2b6030
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index dcf2a1f3643d..dc57f2759f44 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -45,14 +45,14 @@
#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
-#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#if IS_ENABLED(CONFIG_HPLANCE)
#include "hplance.h"
#undef WRITERAP
#undef WRITERDP
#undef READRDP
-#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME147_NET)
/* Lossage Factor Nine, Mr Sulu. */
#define WRITERAP(lp, x) (lp->writerap(lp, x))
@@ -86,7 +86,7 @@ static inline __u16 READRDP(struct lance_private *lp)
}
#endif
-#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+#endif /* IS_ENABLED(CONFIG_HPLANCE) */
/* debugging output macros, various flavours */
/* #define TEST_HITS */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 94960055fa1f..f92cc97151ec 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -89,7 +89,7 @@ Revision History:
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define AMD8111E_VLAN_TAG_USED 1
#else
#define AMD8111E_VLAN_TAG_USED 0
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a9b2709567ec..7f9216db026f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1708,9 +1708,9 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_features = xgbe_set_features,
};
-struct net_device_ops *xgbe_get_netdev_ops(void)
+const struct net_device_ops *xgbe_get_netdev_ops(void)
{
- return (struct net_device_ops *)&xgbe_netdev_ops;
+ return &xgbe_netdev_ops;
}
static void xgbe_rx_refresh(struct xgbe_channel *channel)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 11d9f0c5b78b..4007b429c80c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -623,7 +623,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_ts_info = xgbe_get_ts_info,
};
-struct ethtool_ops *xgbe_get_ethtool_ops(void)
+const struct ethtool_ops *xgbe_get_ethtool_ops(void)
{
- return (struct ethtool_ops *)&xgbe_ethtool_ops;
+ return &xgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 3eee3201b58f..9de078819aa6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -861,9 +861,15 @@ static int xgbe_resume(struct device *dev)
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
DBGPR("<--xgbe_resume\n");
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 98d9d63c4353..5dd17dcea2f8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -956,8 +956,9 @@ struct xgbe_prv_data {
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
-struct net_device_ops *xgbe_get_netdev_ops(void);
-struct ethtool_ops *xgbe_get_ethtool_ops(void);
+const struct net_device_ops *xgbe_get_netdev_ops(void);
+const struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
#ifdef CONFIG_AMD_XGBE_DCB
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 300e3b5c54e0..afccb033177b 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -4,6 +4,7 @@ config NET_XGENE
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
select MDIO_XGENE
+ select GPIOLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
APM X-Gene SoC.
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 472c0fb3f4c4..23d72af83d82 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -32,12 +32,19 @@ static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
SET_VAL(SB_HDRLEN, len);
}
-static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
+static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
+ u32 dstqid, u32 fpsel,
u32 nfpsel, u32 *idt_reg)
{
- *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
- SET_VAL(IDT_FPSEL, fpsel) |
- SET_VAL(IDT_NFPSEL, nfpsel);
+ if (pdata->enet_id == XGENE_ENET1) {
+ *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+ SET_VAL(IDT_FPSEL1, fpsel) |
+ SET_VAL(IDT_NFPSEL1, nfpsel);
+ } else {
+ *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+ SET_VAL(IDT_FPSEL, fpsel) |
+ SET_VAL(IDT_NFPSEL, nfpsel);
+ }
}
static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
@@ -344,7 +351,7 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
nfpsel = 0;
idt_reg = 0;
- xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg);
+ xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR);
if (ret)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 33c5f6b25824..9ac9f8e145ec 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -196,9 +196,13 @@ enum xgene_cle_ptree_dbptrs {
#define IDT_DSTQID_POS 0
#define IDT_DSTQID_LEN 12
#define IDT_FPSEL_POS 12
-#define IDT_FPSEL_LEN 4
-#define IDT_NFPSEL_POS 16
-#define IDT_NFPSEL_LEN 4
+#define IDT_FPSEL_LEN 5
+#define IDT_NFPSEL_POS 17
+#define IDT_NFPSEL_LEN 5
+#define IDT_FPSEL1_POS 12
+#define IDT_FPSEL1_LEN 4
+#define IDT_NFPSEL1_POS 16
+#define IDT_NFPSEL1_LEN 4
struct xgene_cle_ptree_branch {
bool valid;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 22a7b26ca1d6..d372d4235c81 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -54,55 +54,68 @@ static void xgene_get_drvinfo(struct net_device *ndev,
sprintf(info->bus_info, "%s", pdev->name);
}
-static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
+ u32 supported;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (phydev == NULL)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (pdata->mdio_driver) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_MII;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
+ supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
}
return 0;
}
-static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
@@ -110,7 +123,7 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
}
@@ -152,12 +165,12 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
static const struct ethtool_ops xgene_ethtool_ops = {
.get_drvinfo = xgene_get_drvinfo,
- .get_settings = xgene_get_settings,
- .set_settings = xgene_set_settings,
.get_link = ethtool_op_get_link,
.get_strings = xgene_get_strings,
.get_sset_count = xgene_get_sset_count,
- .get_ethtool_stats = xgene_get_ethtool_stats
+ .get_ethtool_stats = xgene_get_ethtool_stats,
+ .get_link_ksettings = xgene_get_link_ksettings,
+ .set_link_ksettings = xgene_set_link_ksettings,
};
void xgene_enet_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 18bb9556dd00..c481f104a8fe 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -713,7 +713,7 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
@@ -761,31 +761,25 @@ int xgene_enet_phy_connect(struct net_device *ndev)
if (dev->of_node) {
for (i = 0 ; i < 2; i++) {
np = of_parse_phandle(dev->of_node, "phy-handle", i);
- if (np)
+ phy_dev = of_phy_connect(ndev, np,
+ &xgene_enet_adjust_link,
+ 0, pdata->phy_mode);
+ of_node_put(np);
+ if (phy_dev)
break;
}
- if (!np) {
- netdev_dbg(ndev, "No phy-handle found in DT\n");
- return -ENODEV;
- }
-
- phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link,
- 0, pdata->phy_mode);
- of_node_put(np);
if (!phy_dev) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
-
- pdata->phy_dev = phy_dev;
} else {
#ifdef CONFIG_ACPI
struct acpi_device *adev = acpi_phy_find_device(dev);
if (adev)
- pdata->phy_dev = adev->driver_data;
-
- phy_dev = pdata->phy_dev;
+ phy_dev = adev->driver_data;
+ else
+ phy_dev = NULL;
if (!phy_dev ||
phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
@@ -853,8 +847,6 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (!phy)
return -EIO;
- pdata->phy_dev = phy;
-
return ret;
}
@@ -894,14 +886,18 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
}
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 179a44dceb29..8456337a237d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -124,6 +124,12 @@ enum xgene_enet_rm {
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+#define PCS_ADDR_REG_OFFSET 0x00
+#define PCS_COMMAND_REG_OFFSET 0x04
+#define PCS_WRITE_REG_OFFSET 0x08
+#define PCS_READ_REG_OFFSET 0x0c
+#define PCS_COMMAND_DONE_REG_OFFSET 0x10
+
#define MII_MGMT_CONFIG_ADDR 0x20
#define MII_MGMT_COMMAND_ADDR 0x24
#define MII_MGMT_ADDRESS_ADDR 0x28
@@ -231,6 +237,8 @@ enum xgene_enet_rm {
#define TCPHDR_LEN 6
#define IPHDR_POS 6
#define IPHDR_LEN 6
+#define MSS_POS 20
+#define MSS_LEN 2
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
#define ET_POS 23 /* Enable TSO */
@@ -247,6 +255,11 @@ enum xgene_enet_rm {
#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
+#define TSO_MSS0_POS 0
+#define TSO_MSS0_LEN 14
+#define TSO_MSS1_POS 16
+#define TSO_MSS1_LEN 14
+
struct xgene_enet_raw_desc {
__le64 m0;
__le64 m1;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d1d6b5eeb613..429f18fc5503 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/gpio.h>
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
@@ -72,7 +73,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
skb = netdev_alloc_skb_ip_align(ndev, len);
if (unlikely(!skb))
return -ENOMEM;
- buf_pool->rx_skb[tail] = skb;
dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
@@ -81,6 +81,8 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
return -EINVAL;
}
+ buf_pool->rx_skb[tail] = skb;
+
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(BUFDATALEN, bufdatalen) |
SET_BIT(COHERENT));
@@ -102,12 +104,21 @@ static u8 xgene_enet_hdr_len(const void *data)
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct device *dev = ndev_to_dev(buf_pool->ndev);
+ struct xgene_enet_raw_desc16 *raw_desc;
+ dma_addr_t dma_addr;
int i;
/* Free up the buffers held by hardware */
for (i = 0; i < buf_pool->slots; i++) {
- if (buf_pool->rx_skb[i])
+ if (buf_pool->rx_skb[i]) {
dev_kfree_skb_any(buf_pool->rx_skb[i]);
+
+ raw_desc = &buf_pool->raw_desc16[i];
+ dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
+ DMA_FROM_DEVICE);
+ }
}
}
@@ -126,6 +137,7 @@ static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
struct xgene_enet_raw_desc *raw_desc)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
struct sk_buff *skb;
struct device *dev;
skb_frag_t *frag;
@@ -133,6 +145,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
u16 skb_index;
u8 status;
int i, ret = 0;
+ u8 mss_index;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -149,6 +162,13 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
DMA_TO_DEVICE);
}
+ if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
+ mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
+ spin_lock(&pdata->mss_lock);
+ pdata->mss_refcnt[mss_index]--;
+ spin_unlock(&pdata->mss_lock);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -167,15 +187,53 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
return ret;
}
-static u64 xgene_enet_work_msg(struct sk_buff *skb)
+static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ bool mss_index_found = false;
+ int mss_index;
+ int i;
+
+ spin_lock(&pdata->mss_lock);
+
+ /* Reuse the slot if MSS matches */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (pdata->mss[i] == mss) {
+ pdata->mss_refcnt[i]++;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ /* Overwrite the slot with ref_count = 0 */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (!pdata->mss_refcnt[i]) {
+ pdata->mss_refcnt[i]++;
+ pdata->mac_ops->set_mss(pdata, mss, i);
+ pdata->mss[i] = mss;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ spin_unlock(&pdata->mss_lock);
+
+ /* No slots with ref_count = 0 available, return busy */
+ if (!mss_index_found)
+ return -EBUSY;
+
+ return mss_index;
+}
+
+static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
{
struct net_device *ndev = skb->dev;
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
- u64 hopinfo = 0;
u32 hdr_len, mss = 0;
u32 i, len, nr_frags;
+ int mss_index;
ethhdr = xgene_enet_hdr_len(skb->data);
@@ -215,7 +273,11 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
- hopinfo |= SET_BIT(ET);
+ mss_index = xgene_enet_setup_mss(ndev, mss);
+ if (unlikely(mss_index < 0))
+ return -EBUSY;
+
+ *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
}
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
@@ -223,15 +285,15 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- hopinfo |= SET_VAL(TCPHDR, l4hlen) |
- SET_VAL(IPHDR, l3hlen) |
- SET_VAL(ETHHDR, ethhdr) |
- SET_VAL(EC, csum_enable) |
- SET_VAL(IS, proto) |
- SET_BIT(IC) |
- SET_BIT(TYPE_ETH_WORK_MESSAGE);
-
- return hopinfo;
+ *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
+ SET_VAL(IPHDR, l3hlen) |
+ SET_VAL(ETHHDR, ethhdr) |
+ SET_VAL(EC, csum_enable) |
+ SET_VAL(IS, proto) |
+ SET_BIT(IC) |
+ SET_BIT(TYPE_ETH_WORK_MESSAGE);
+
+ return 0;
}
static u16 xgene_enet_encode_len(u16 len)
@@ -271,20 +333,22 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
skb_frag_t *frag;
u16 tail = tx_ring->tail;
- u64 hopinfo;
+ u64 hopinfo = 0;
u32 len, hw_len;
u8 ll = 0, nv = 0, idx = 0;
bool split = false;
u32 size, offset, ell_bytes = 0;
u32 i, fidx, nr_frags, count = 1;
+ int ret;
raw_desc = &tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- hopinfo = xgene_enet_work_msg(skb);
- if (!hopinfo)
- return -EINVAL;
+ ret = xgene_enet_work_msg(skb, &hopinfo);
+ if (ret)
+ return ret;
+
raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
hopinfo);
@@ -424,6 +488,9 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count == -EBUSY)
+ return NETDEV_TX_BUSY;
+
if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -452,7 +519,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc)
{
struct net_device *ndev;
- struct xgene_enet_pdata *pdata;
struct device *dev;
struct xgene_enet_desc_ring *buf_pool;
u32 datalen, skb_index;
@@ -461,7 +527,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
int ret = 0;
ndev = rx_ring->ndev;
- pdata = netdev_priv(ndev);
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
@@ -739,8 +804,8 @@ static int xgene_enet_open(struct net_device *ndev)
if (ret)
return ret;
- if (pdata->phy_dev) {
- phy_start(pdata->phy_dev);
+ if (ndev->phydev) {
+ phy_start(ndev->phydev);
} else {
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
netif_carrier_off(ndev);
@@ -763,8 +828,8 @@ static int xgene_enet_close(struct net_device *ndev)
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
else
cancel_delayed_work_sync(&pdata->link_work);
@@ -1312,6 +1377,18 @@ static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
return 0;
}
+static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ return;
+
+ pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
+ if (IS_ERR(pdata->sfp_rdy))
+ pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -1401,6 +1478,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
+ xgene_enet_gpiod_get(pdata);
+
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
@@ -1425,6 +1504,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
+ pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
}
pdata->rx_buff_cnt = NUM_PKT_BUF;
@@ -1454,10 +1534,8 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
buf_pool = pdata->rx_ring[i]->buf_pool;
xgene_enet_init_bufpool(buf_pool);
ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
- if (ret) {
- xgene_enet_delete_desc_rings(pdata);
- return ret;
- }
+ if (ret)
+ goto err;
}
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
@@ -1474,7 +1552,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
ret = pdata->cle_ops->cle_init(pdata);
if (ret) {
netdev_err(ndev, "Preclass Tree init error\n");
- return ret;
+ goto err;
}
} else {
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
@@ -1484,6 +1562,10 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
pdata->mac_ops->init(pdata);
return ret;
+
+err:
+ xgene_enet_delete_desc_rings(pdata);
+ return ret;
}
static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
@@ -1631,8 +1713,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
}
#endif
if (!pdata->enet_id) {
- free_netdev(ndev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
ret = xgene_enet_get_resources(pdata);
@@ -1643,7 +1725,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO;
- pdata->mss = XGENE_ENET_MSS;
+ spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
@@ -1655,7 +1737,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
ret = xgene_enet_init_hw(pdata);
if (ret)
- goto err_netdev;
+ goto err;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -1665,21 +1747,32 @@ static int xgene_enet_probe(struct platform_device *pdev)
ret = xgene_enet_mdio_config(pdata);
else
INIT_DELAYED_WORK(&pdata->link_work, link_state);
+
+ if (ret)
+ goto err1;
}
- if (ret)
- goto err;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
- goto err;
+ goto err2;
}
return 0;
-err_netdev:
- unregister_netdev(ndev);
+err2:
+ /*
+ * If necessary, free_netdev() will call netif_napi_del() and undo
+ * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
+ */
+
+ if (pdata->mdio_driver)
+ xgene_enet_phy_disconnect(pdata);
+ else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
+err1:
+ xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;
@@ -1688,11 +1781,9 @@ err:
static int xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
- const struct xgene_mac_ops *mac_ops;
struct net_device *ndev;
pdata = platform_get_drvdata(pdev);
- mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
rtnl_lock();
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 217546e5714a..0cda58f5a840 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -47,7 +47,7 @@
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
#define MAX_EXP_BUFFS 256
-#define XGENE_ENET_MSS 1448
+#define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 16
@@ -143,7 +143,7 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
- void (*set_mss)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work);
};
@@ -174,7 +174,6 @@ struct xgene_cle_ops {
struct xgene_enet_pdata {
struct net_device *ndev;
struct mii_bus *mdio_bus;
- struct phy_device *phy_dev;
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
@@ -196,6 +195,7 @@ struct xgene_enet_pdata {
void __iomem *mcx_mac_addr;
void __iomem *mcx_mac_csr_addr;
void __iomem *base_addr;
+ void __iomem *pcs_addr;
void __iomem *ring_csr_addr;
void __iomem *ring_cmd_addr;
int phy_mode;
@@ -212,10 +212,13 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
- u32 mss;
+ u32 mss[NUM_MSS_REG];
+ u32 mss_refcnt[NUM_MSS_REG];
+ spinlock_t mss_lock; /* mss lock */
u8 tx_delay;
u8 rx_delay;
bool mdio_driver;
+ struct gpio_desc *sfp_rdy;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 9c6ad0dce00f..6475f383ba83 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -18,6 +18,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
@@ -84,6 +86,21 @@ static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
wr_addr);
}
+static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
+ u32 wr_addr, u32 wr_data)
+{
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+
+ addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+ wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
+ cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+ cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+ if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
+ netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
+ wr_addr);
+}
+
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 *val)
{
@@ -122,6 +139,7 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
+
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -137,6 +155,25 @@ static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
rd_addr);
}
+static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
+ u32 rd_addr, u32 *rd_data)
+{
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ bool success;
+
+ addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+ rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
+ cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+ cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+ success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
+ if (!success)
+ netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
+ rd_addr);
+
+ return success;
+}
+
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
{
struct net_device *ndev = pdata->ndev;
@@ -171,6 +208,17 @@ static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
}
+static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
+ return;
+
+ xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
+ xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
+}
+
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
u32 addr0, addr1;
@@ -184,9 +232,22 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
-static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
+ u16 mss, u8 index)
{
- xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+ u8 offset;
+ u32 data;
+
+ offset = (index < 2) ? 0 : 4;
+ xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
+
+ if (!(index & 0x1))
+ data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
+ SET_VAL(TSO_MSS0, mss);
+ else
+ data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
+
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
}
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
@@ -210,18 +271,17 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
xgene_xgmac_set_mac_addr(pdata);
- xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
- xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
data |= BIT(12);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
+ xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
+ xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
}
static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
@@ -359,14 +419,17 @@ static void xgene_enet_link_state(struct work_struct *work)
{
struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
struct xgene_enet_pdata, link_work);
+ struct gpio_desc *sfp_rdy = pdata->sfp_rdy;
struct net_device *ndev = pdata->ndev;
u32 link_status, poll_interval;
link_status = xgene_enet_link_status(pdata);
+ if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy))
+ link_status = 0;
+
if (link_status) {
if (!netif_carrier_ok(ndev)) {
netif_carrier_on(ndev);
- xgene_xgmac_init(pdata);
xgene_xgmac_rx_enable(pdata);
xgene_xgmac_tx_enable(pdata);
netdev_info(ndev, "Link is Up - 10Gbps\n");
@@ -380,6 +443,8 @@ static void xgene_enet_link_state(struct work_struct *work)
netdev_info(ndev, "Link is Down\n");
}
poll_interval = PHY_POLL_LINK_OFF;
+
+ xgene_pcs_reset(pdata);
}
schedule_delayed_work(&pdata->link_work, poll_interval);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index f1ea485f916b..360ccbd95566 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -24,6 +24,7 @@
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define BLOCK_PCS_OFFSET 0x3800
#define XGENET_CONFIG_REG_ADDR 0x20
#define XGENET_SRST_ADDR 0x00
@@ -72,6 +73,9 @@
#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
+#define PCS_CONTROL_1 0x0000
+#define PCS_CTRL_PCS_RST BIT(15)
+
extern const struct xgene_mac_ops xgene_xgmac_ops;
extern const struct xgene_port_ops xgene_xgport_ops;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 058460bdd5a6..a22403c688c9 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -104,7 +104,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
* @bus: points to the mii_bus structure
* Description: reset the MII bus
*/
-int arc_mdio_reset(struct mii_bus *bus)
+static int arc_mdio_reset(struct mii_bus *bus)
{
struct arc_emac_priv *priv = bus->priv;
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..6cac919272ea 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -76,11 +76,19 @@ enum alx_device_quirks {
ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
};
+#define ALX_FLAG_USING_MSIX BIT(0)
+#define ALX_FLAG_USING_MSI BIT(1)
+
struct alx_priv {
struct net_device *dev;
struct alx_hw hw;
+ /* msi-x vectors */
+ int num_vec;
+ struct msix_entry *msix_entries;
+ char irq_lbl[IFNAMSIZ + 8];
+
/* all descriptor memory */
struct {
dma_addr_t dma;
@@ -105,7 +113,7 @@ struct alx_priv {
u16 msg_enable;
- bool msi;
+ int flags;
/* protects hw.stats */
spinlock_t stats_lock;
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1fe35e453d43..6ac40b0003a3 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1031,6 +1031,20 @@ void alx_configure_basic(struct alx_hw *hw)
alx_write_mem32(hw, ALX_WRR, val);
}
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask)
+{
+ u32 reg, val;
+
+ reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0;
+
+ alx_write_mem32(hw, reg, val);
+ alx_post_write(hw);
+}
+
+
bool alx_get_phy_info(struct alx_hw *hw)
{
u16 devs1, devs2;
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index f289c05f5cb4..0191477ace51 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -562,6 +562,7 @@ int alx_reset_mac(struct alx_hw *hw);
void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
void alx_update_hw_stats(struct alx_hw *hw);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4eb17daefc4f..c0f84b73574d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -51,6 +51,9 @@
const char alx_drv_name[] = "alx";
+static bool msix = false;
+module_param(msix, bool, 0);
+MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
static void alx_free_txbuf(struct alx_priv *alx, int entry)
{
@@ -292,32 +295,29 @@ static int alx_poll(struct napi_struct *napi, int budget)
napi_complete(&alx->napi);
/* enable interrupt */
- spin_lock_irqsave(&alx->irq_lock, flags);
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- spin_unlock_irqrestore(&alx->irq_lock, flags);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_mask_msix(hw, 1, false);
+ } else {
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+ }
alx_post_write(hw);
return work;
}
-static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
{
struct alx_hw *hw = &alx->hw;
- bool write_int_mask = false;
-
- spin_lock(&alx->irq_lock);
-
- /* ACK interrupt */
- alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
- intr &= alx->int_mask;
if (intr & ALX_ISR_FATAL) {
netif_warn(alx, hw, alx->dev,
"fatal interrupt 0x%x, resetting\n", intr);
alx_schedule_reset(alx);
- goto out;
+ return true;
}
if (intr & ALX_ISR_ALERT)
@@ -329,19 +329,32 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
* is cleared, the interrupt status could be cleared.
*/
alx->int_mask &= ~ALX_ISR_PHY;
- write_int_mask = true;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_schedule_link_check(alx);
}
+ return false;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (alx_intr_handle_misc(alx, intr))
+ goto out;
+
if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
napi_schedule(&alx->napi);
/* mask rx/tx interrupt, enable them when napi complete */
alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- write_int_mask = true;
- }
-
- if (write_int_mask)
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ }
alx_write_mem32(hw, ALX_ISR, 0);
@@ -350,6 +363,46 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
return IRQ_HANDLED;
}
+static irqreturn_t alx_intr_msix_ring(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 1, true);
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0));
+
+ napi_schedule(&alx->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msix_misc(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 0, true);
+
+ /* read interrupt status */
+ intr = alx_read_mem32(hw, ALX_ISR);
+ intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
+
+ if (alx_intr_handle_misc(alx, intr))
+ return IRQ_HANDLED;
+
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, intr);
+
+ /* enable interrupt again */
+ alx_mask_msix(hw, 0, false);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t alx_intr_msi(int irq, void *data)
{
struct alx_priv *alx = data;
@@ -614,31 +667,136 @@ static void alx_free_rings(struct alx_priv *alx)
static void alx_config_vector_mapping(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ u32 tbl = 0;
- alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT;
+ tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
+ }
+
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl);
alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
+static bool alx_enable_msix(struct alx_priv *alx)
+{
+ int i, err, num_vec = 2;
+
+ alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!alx->msix_entries) {
+ netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
+ return false;
+ }
+
+ for (i = 0; i < num_vec; i++)
+ alx->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+ if (err) {
+ kfree(alx->msix_entries);
+ netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
+ return false;
+ }
+
+ alx->num_vec = num_vec;
+ return true;
+}
+
+static int alx_request_msix(struct alx_priv *alx)
+{
+ struct net_device *netdev = alx->dev;
+ int i, err, vector = 0, free_vector = 0;
+
+ err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+ 0, netdev->name, alx);
+ if (err)
+ goto out_err;
+
+ vector++;
+ sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name);
+
+ err = request_irq(alx->msix_entries[vector].vector,
+ alx_intr_msix_ring, 0, alx->irq_lbl, alx);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+ vector--;
+ for (i = 0; i < vector; i++)
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+out_err:
+ return err;
+}
+
+static void alx_init_intr(struct alx_priv *alx, bool msix)
+{
+ if (msix) {
+ if (alx_enable_msix(alx))
+ alx->flags |= ALX_FLAG_USING_MSIX;
+ }
+
+ if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
+ alx->num_vec = 1;
+
+ if (!pci_enable_msi(alx->hw.pdev))
+ alx->flags |= ALX_FLAG_USING_MSI;
+ }
+}
+
+static void alx_disable_advanced_intr(struct alx_priv *alx)
+{
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ kfree(alx->msix_entries);
+ pci_disable_msix(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSIX;
+ }
+
+ if (alx->flags & ALX_FLAG_USING_MSI) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSI;
+ }
+}
+
static void alx_irq_enable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
/* level-1 interrupt switch */
alx_write_mem32(hw, ALX_ISR, 0);
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_post_write(hw);
+
+ if (alx->flags & ALX_FLAG_USING_MSIX)
+ /* enable all msix irqs */
+ for (i = 0; i < alx->num_vec; i++)
+ alx_mask_msix(hw, i, false);
}
static void alx_irq_disable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
alx_write_mem32(hw, ALX_IMR, 0);
alx_post_write(hw);
- synchronize_irq(alx->hw.pdev->irq);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ for (i = 0; i < alx->num_vec; i++) {
+ alx_mask_msix(hw, i, true);
+ synchronize_irq(alx->msix_entries[i].vector);
+ }
+ } else {
+ synchronize_irq(alx->hw.pdev->irq);
+ }
}
static int alx_request_irq(struct alx_priv *alx)
@@ -650,9 +808,18 @@ static int alx_request_irq(struct alx_priv *alx)
msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
- if (!pci_enable_msi(alx->hw.pdev)) {
- alx->msi = true;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
+ err = alx_request_msix(alx);
+ if (!err)
+ goto out;
+
+ /* msix request failed, realloc resources */
+ alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
+ }
+ if (alx->flags & ALX_FLAG_USING_MSI) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
msi_ctrl | ALX_MSI_MASK_SEL_LINE);
err = request_irq(pdev->irq, alx_intr_msi, 0,
@@ -660,6 +827,7 @@ static int alx_request_irq(struct alx_priv *alx)
if (!err)
goto out;
/* fall back to legacy interrupt */
+ alx->flags &= ~ALX_FLAG_USING_MSI;
pci_disable_msi(alx->hw.pdev);
}
@@ -669,19 +837,25 @@ static int alx_request_irq(struct alx_priv *alx)
out:
if (!err)
alx_config_vector_mapping(alx);
+ else
+ netdev_err(alx->dev, "IRQ registration failed!\n");
return err;
}
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
+ int i;
- free_irq(pdev->irq, alx);
-
- if (alx->msi) {
- pci_disable_msi(alx->hw.pdev);
- alx->msi = false;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ /* we have only 2 vectors without multi queue support */
+ for (i = 0; i < 2; i++)
+ free_irq(alx->msix_entries[i].vector, alx);
+ } else {
+ free_irq(pdev->irq, alx);
}
+
+ alx_disable_advanced_intr(alx);
}
static int alx_identify_hw(struct alx_priv *alx)
@@ -847,12 +1021,14 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
+ alx_init_intr(alx, msix);
+
if (!resume)
netif_carrier_off(alx->dev);
err = alx_alloc_rings(alx);
if (err)
- return err;
+ goto out_disable_adv_intr;
alx_configure(alx);
@@ -873,6 +1049,8 @@ static int __alx_open(struct alx_priv *alx, bool resume)
out_free_rings:
alx_free_rings(alx);
+out_disable_adv_intr:
+ alx_disable_advanced_intr(alx);
return err;
}
@@ -993,6 +1171,18 @@ static void alx_reset(struct work_struct *work)
rtnl_unlock();
}
+static int alx_tpd_req(struct sk_buff *skb)
+{
+ int num;
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ /* we need one extra descriptor for LSOv2 */
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ num++;
+
+ return num;
+}
+
static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
{
u8 cso, css;
@@ -1012,6 +1202,45 @@ static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
return 0;
}
+static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
+{
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ first->word1 |= 1 << TPD_IPV4_SHIFT;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ /* LSOv2: the first TPD only provides the packet length */
+ first->adrl.l.pkt_len = skb->len;
+ first->word1 |= 1 << TPD_LSO_V2_SHIFT;
+ }
+
+ first->word1 |= 1 << TPD_LSO_EN_SHIFT;
+ first->word1 |= (skb_transport_offset(skb) &
+ TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
+ first->word1 |= (skb_shinfo(skb)->gso_size &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ return 1;
+}
+
static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
{
struct alx_tx_queue *txq = &alx->txq;
@@ -1022,6 +1251,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
first_tpd = &txq->tpd[txq->write_idx];
tpd = first_tpd;
+ if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+
+ tpd = &txq->tpd[txq->write_idx];
+ tpd->len = first_tpd->len;
+ tpd->vlan_tag = first_tpd->vlan_tag;
+ tpd->word1 = first_tpd->word1;
+ }
+
maplen = skb_headlen(skb);
dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
DMA_TO_DEVICE);
@@ -1082,9 +1321,9 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
struct alx_priv *alx = netdev_priv(netdev);
struct alx_tx_queue *txq = &alx->txq;
struct alx_txd *first;
- int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+ int tso;
- if (alx_tpd_avail(alx) < tpdreq) {
+ if (alx_tpd_avail(alx) < alx_tpd_req(skb)) {
netif_stop_queue(alx->dev);
goto drop;
}
@@ -1092,7 +1331,10 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
first = &txq->tpd[txq->write_idx];
memset(first, 0, sizeof(*first));
- if (alx_tx_csum(skb, first))
+ tso = alx_tso(skb, first);
+ if (tso < 0)
+ goto drop;
+ else if (!tso && alx_tx_csum(skb, first))
goto drop;
if (alx_map_tx_skb(alx, skb) < 0)
@@ -1172,7 +1414,10 @@ static void alx_poll_controller(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
- if (alx->msi)
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_intr_msix_misc(0, alx);
+ alx_intr_msix_ring(0, alx);
+ } else if (alx->flags & ALX_FLAG_USING_MSI)
alx_intr_msi(0, alx);
else
alx_intr_legacy(0, alx);
@@ -1351,7 +1596,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
dev_warn(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74f0a37c4eb6..17aa33c5567d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1486,7 +1486,7 @@ static int b44_open(struct net_device *dev)
b44_enable_ints(bp);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_start(bp->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
out:
@@ -1651,7 +1651,7 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_stop(bp->phydev);
+ phy_stop(dev->phydev);
napi_disable(&bp->napi);
@@ -1832,90 +1832,100 @@ static int b44_nway_reset(struct net_device *dev)
return r;
}
-static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ u32 supported, advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- return phy_ethtool_gset(bp->phydev, cmd);
+ BUG_ON(!dev->phydev);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_MII);
+ supported = (SUPPORTED_Autoneg);
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
- cmd->advertising = 0;
+ advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
- SPEED_100 : SPEED_10));
- cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = 0;
- cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
- cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ cmd->base.port = 0;
+ cmd->base.phy_address = bp->phy_addr;
+ cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
- if (cmd->autoneg == AUTONEG_ENABLE)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ advertising |= ADVERTISED_Autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (!netif_running(dev)){
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+
return 0;
}
-static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
u32 speed;
int ret;
+ u32 advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
+ BUG_ON(!dev->phydev);
spin_lock_irq(&bp->lock);
if (netif_running(dev))
b44_setup_phy(bp);
- ret = phy_ethtool_sset(bp->phydev, cmd);
+ ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
spin_unlock_irq(&bp->lock);
return ret;
}
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* We do not support gigabit. */
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (cmd->advertising &
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)) {
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
bp->flags &= ~(B44_FLAG_FORCE_LINK |
B44_FLAG_100_BASE_T |
B44_FLAG_FULL_DUPLEX |
@@ -1923,19 +1933,19 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
- if (cmd->advertising == 0) {
+ if (advertising == 0) {
bp->flags |= (B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
} else {
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
bp->flags |= B44_FLAG_ADV_10HALF;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->flags |= B44_FLAG_ADV_10FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
bp->flags |= B44_FLAG_ADV_100HALF;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->flags |= B44_FLAG_ADV_100FULL;
}
} else {
@@ -1943,7 +1953,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
@@ -2110,8 +2120,6 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
- .get_settings = b44_get_settings,
- .set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = b44_get_wol,
@@ -2125,6 +2133,8 @@ static const struct ethtool_ops b44_ethtool_ops = {
.get_strings = b44_get_strings,
.get_sset_count = b44_get_sset_count,
.get_ethtool_stats = b44_get_ethtool_stats,
+ .get_link_ksettings = b44_get_link_ksettings,
+ .set_link_ksettings = b44_set_link_ksettings,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2137,8 +2147,8 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_lock_irq(&bp->lock);
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ BUG_ON(!dev->phydev);
+ err = phy_mii_ioctl(dev->phydev, ifr, cmd);
} else {
err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
}
@@ -2206,7 +2216,7 @@ static const struct net_device_ops b44_netdev_ops = {
static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phydev;
+ struct phy_device *phydev = dev->phydev;
bool status_changed = 0;
BUG_ON(!phydev);
@@ -2303,7 +2313,6 @@ static int b44_register_phy_one(struct b44 *bp)
SUPPORTED_MII);
phydev->advertising = phydev->supported;
- bp->phydev = phydev;
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
@@ -2323,9 +2332,10 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
+ struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
- phy_disconnect(bp->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 65d88d7c5581..89d2cf341163 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -404,7 +404,6 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int old_link;
struct mii_if_info mii_if;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6c8bc5fadac7..ae364c74baf3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -791,7 +791,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
int status_changed;
priv = netdev_priv(dev);
- phydev = priv->phydev;
+ phydev = dev->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
@@ -913,7 +913,6 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
- priv->phydev = phydev;
}
/* mask all interrupts and request them */
@@ -1085,7 +1084,7 @@ static int bcm_enet_open(struct net_device *dev)
ENETDMAC_IRMASK, priv->tx_chan);
if (priv->has_phy)
- phy_start(priv->phydev);
+ phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1127,7 +1126,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1190,7 +1189,7 @@ static int bcm_enet_stop(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
@@ -1234,10 +1233,8 @@ static int bcm_enet_stop(struct net_device *dev)
free_irq(dev->irq, dev);
/* release phy */
- if (priv->has_phy) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
+ if (priv->has_phy)
+ phy_disconnect(dev->phydev);
return 0;
}
@@ -1437,64 +1434,68 @@ static int bcm_enet_nway_reset(struct net_device *dev)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(dev->phydev);
}
return -EOPNOTSUPP;
}
-static int bcm_enet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
+ u32 supported, advertising;
priv = netdev_priv(dev);
- cmd->maxrxpkt = 0;
- cmd->maxtxpkt = 0;
-
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
} else {
- cmd->autoneg = 0;
- ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
- ? SPEED_100 : SPEED_10));
- cmd->duplex = (priv->force_duplex_full) ?
+ cmd->base.autoneg = 0;
+ cmd->base.speed = (priv->force_speed_100) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->supported = ADVERTISED_10baseT_Half |
+ supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->advertising = 0;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
+ advertising = 0;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ cmd->base.port = PORT_MII;
}
return 0;
}
-static int bcm_enet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
} else {
- if (cmd->autoneg ||
- (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
- cmd->port != PORT_MII)
+ if (cmd->base.autoneg ||
+ (cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ cmd->base.port != PORT_MII)
return -EINVAL;
- priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
- priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+ priv->force_speed_100 =
+ (cmd->base.speed == SPEED_100) ? 1 : 0;
+ priv->force_duplex_full =
+ (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
@@ -1588,14 +1589,14 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.nway_reset = bcm_enet_nway_reset,
- .get_settings = bcm_enet_get_settings,
- .set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
+ .get_link_ksettings = bcm_enet_get_link_ksettings,
+ .set_link_ksettings = bcm_enet_set_link_ksettings,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1604,9 +1605,9 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
} else {
struct mii_if_info mii;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index f55af4310085..0a1b7b2e55bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -290,7 +290,6 @@ struct bcm_enet_priv {
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b2d30863caeb..c3354b9941d1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -58,8 +58,8 @@ BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
u32 mask) \
{ \
- intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
priv->irq##which##_mask &= ~(mask); \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
} \
static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
u32 mask) \
@@ -1692,7 +1692,7 @@ static int bcm_sysport_stop(struct net_device *dev)
return 0;
}
-static struct ethtool_ops bcm_sysport_ethtool_ops = {
+static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 625235db644f..c16ec3a51876 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
static int bgmac_probe(struct bcma_device *core)
{
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
@@ -157,7 +158,8 @@ static int bgmac_probe(struct bcma_device *core)
dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr,
bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
- if (!bgmac_is_bcm4707_family(core)) {
+ if (!bgmac_is_bcm4707_family(core) &&
+ !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
@@ -230,6 +232,21 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
+ case BCMA_CHIP_ID_BCM53573:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
+ }
+ break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index c4751ece76f6..6ea0e5ff1e44 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -932,7 +932,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype <<= 4;
sw_type = et_swtype;
} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
- sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
@@ -940,6 +941,27 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
BGMAC_CHIPCTL_1_SW_TYPE_MASK),
sw_type);
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
+ u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
+ BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
+ u8 et_swtype = 0;
+ char buf[4];
+
+ if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
+ if (kstrtou8(buf, 0, &et_swtype))
+ dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
+ buf);
+ sw_type = (et_swtype & 0x0f) << 12;
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
+ sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
+ }
+ bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_4_SW_TYPE_MASK),
+ sw_type);
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
+ bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
+ BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
}
if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
@@ -1467,6 +1489,10 @@ int bgmac_enet_probe(struct bgmac *info)
*/
bgmac_clk_enable(bgmac, 0);
+ /* This seems to be fixing IRQ by assigning OOB #6 to the core */
+ if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+ bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+
bgmac_chip_reset(bgmac);
err = bgmac_dma_alloc(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 24a250267b88..80836b4c9f38 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -369,6 +369,21 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
+#define BGMAC_CHIPCTL_4_IF_TYPE_MASK 0x00003000
+#define BGMAC_CHIPCTL_4_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_4_IF_TYPE_MII 0x00001000
+#define BGMAC_CHIPCTL_4_IF_TYPE_RGMII 0x00002000
+#define BGMAC_CHIPCTL_4_SW_TYPE_MASK 0x0000C000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHY 0x00000000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYMII 0x00004000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYRMII 0x00008000
+#define BGMAC_CHIPCTL_4_SW_TYPE_RGMII 0x0000C000
+
+#define BGMAC_CHIPCTL_7_IF_TYPE_MASK 0x000000C0
+#define BGMAC_CHIPCTL_7_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
+#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
+
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -390,6 +405,10 @@
#define BGMAC_FEAT_NO_CLR_MIB BIT(13)
#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14)
#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15)
+#define BGMAC_FEAT_IRQ_ID_OOB_6 BIT(16)
+#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
+#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
+#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
struct bgmac_slot_info {
union {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 505ceaf451e2..27f11a5d5fe2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -50,7 +50,7 @@
#include <linux/log2.h>
#include <linux/aer.h>
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#if IS_ENABLED(CONFIG_CNIC)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e68fadecfdb..243cb9748d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -492,7 +492,8 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
-int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fa3386bb14f7..20fe6a8c35c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12563,41 +12563,64 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
+struct bnx2x_mcast_list_elem_group
+{
+ struct list_head mcast_group_link;
+ struct bnx2x_mcast_list_elem mcast_elems[];
+};
+
+#define MCAST_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
+ sizeof(struct bnx2x_mcast_list_elem))
+
+static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_list_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p)
+ struct bnx2x_mcast_ramrod_params *p,
+ struct list_head *mcast_group_list)
{
- int mc_count = netdev_mc_count(bp->dev);
- struct bnx2x_mcast_list_elem *mc_mac =
- kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
+ struct bnx2x_mcast_list_elem *mc_mac;
struct netdev_hw_addr *ha;
-
- if (!mc_mac)
- return -ENOMEM;
+ struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
+ int mc_count = netdev_mc_count(bp->dev);
+ int offset = 0;
INIT_LIST_HEAD(&p->mcast_list);
-
netdev_for_each_mc_addr(ha, bp->dev) {
+ if (!offset) {
+ current_mcast_group =
+ (struct bnx2x_mcast_list_elem_group *)
+ __get_free_page(GFP_ATOMIC);
+ if (!current_mcast_group) {
+ bnx2x_free_mcast_macs_list(mcast_group_list);
+ BNX2X_ERR("Failed to allocate mc MAC list\n");
+ return -ENOMEM;
+ }
+ list_add(&current_mcast_group->mcast_group_link,
+ mcast_group_list);
+ }
+ mc_mac = &current_mcast_group->mcast_elems[offset];
mc_mac->mac = bnx2x_mc_addr(ha);
list_add_tail(&mc_mac->link, &p->mcast_list);
- mc_mac++;
+ offset++;
+ if (offset == MCAST_ELEMS_PER_PG)
+ offset = 0;
}
-
p->mcast_list_len = mc_count;
-
return 0;
}
-static void bnx2x_free_mcast_macs_list(
- struct bnx2x_mcast_ramrod_params *p)
-{
- struct bnx2x_mcast_list_elem *mc_mac =
- list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
- link);
-
- WARN_ON(!mc_mac);
- kfree(mc_mac);
-}
-
/**
* bnx2x_set_uc_list - configure a new unicast MACs list.
*
@@ -12643,8 +12666,9 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
BNX2X_UC_LIST_MAC, &ramrod_flags);
}
-static int bnx2x_set_mc_list(struct bnx2x *bp)
+static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
{
+ LIST_HEAD(mcast_group_list);
struct net_device *dev = bp->dev;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc = 0;
@@ -12660,12 +12684,9 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
/* then, configure a new MACs list */
if (netdev_mc_count(dev)) {
- rc = bnx2x_init_mcast_macs_list(bp, &rparam);
- if (rc) {
- BNX2X_ERR("Failed to create multicast MACs list: %d\n",
- rc);
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
return rc;
- }
/* Now add the new MACs */
rc = bnx2x_config_mcast(bp, &rparam,
@@ -12674,7 +12695,44 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
rc);
- bnx2x_free_mcast_macs_list(&rparam);
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ }
+
+ return rc;
+}
+
+static int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ LIST_HEAD(mcast_group_list);
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ struct net_device *dev = bp->dev;
+ int rc = 0;
+
+ /* On older adapters, we need to flush and re-add filters */
+ if (CHIP_IS_E1x(bp))
+ return bnx2x_set_mc_list_e1x(bp);
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
+ return rc;
+
+ /* Override the curently configured set of mc filters */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_SET);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
+
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ } else {
+ /* If no mc addresses are required, flush the configuration */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc)
+ BNX2X_ERR("Failed to clear multicast configuration %d\n",
+ rc);
}
return rc;
@@ -13214,13 +13272,22 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_IPXIP4;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
+ dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index ff702a707a91..cea6bdcde33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2600,8 +2600,29 @@ struct bnx2x_mcast_mac_elem {
u8 pad[2]; /* For a natural alignment of the following buffer */
};
+struct bnx2x_mcast_bin_elem {
+ struct list_head link;
+ int bin;
+ int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */
+};
+
+union bnx2x_mcast_elem {
+ struct bnx2x_mcast_bin_elem bin_elem;
+ struct bnx2x_mcast_mac_elem mac_elem;
+};
+
+struct bnx2x_mcast_elem_group {
+ struct list_head mcast_group_link;
+ union bnx2x_mcast_elem mcast_elems[];
+};
+
+#define MCAST_MAC_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \
+ sizeof(union bnx2x_mcast_elem))
+
struct bnx2x_pending_mcast_cmd {
struct list_head link;
+ struct list_head group_head;
int type; /* BNX2X_MCAST_CMD_X */
union {
struct list_head macs_head;
@@ -2609,6 +2630,11 @@ struct bnx2x_pending_mcast_cmd {
int next_bin; /* Needed for RESTORE flow with aprox match */
} data;
+ bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set
+ * when macs_head had been converted to a list of
+ * bnx2x_mcast_bin_elem.
+ */
+
bool done; /* set to true, when the command has been handled,
* practically used in 57712 handling only, where one pending
* command may be handled in a few operations. As long as for
@@ -2627,53 +2653,93 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
return 0;
}
+static void bnx2x_free_groups(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
enum bnx2x_mcast_cmd cmd)
{
- int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
- struct bnx2x_mcast_mac_elem *cur_mac = NULL;
struct bnx2x_mcast_list_elem *pos;
- int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
- p->mcast_list_len : 0);
+ struct bnx2x_mcast_elem_group *elem_group;
+ struct bnx2x_mcast_mac_elem *mac_elem;
+ int total_elems = 0, macs_list_len = 0, offset = 0;
+
+ /* When adding MACs we'll need to store their values */
+ if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET)
+ macs_list_len = p->mcast_list_len;
/* If the command is empty ("handle pending commands only"), break */
if (!p->mcast_list_len)
return 0;
- total_sz = sizeof(*new_cmd) +
- macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
-
/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
- new_cmd = kzalloc(total_sz, GFP_ATOMIC);
-
+ new_cmd = kzalloc(sizeof(*new_cmd), GFP_ATOMIC);
if (!new_cmd)
return -ENOMEM;
- DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
- cmd, macs_list_len);
-
INIT_LIST_HEAD(&new_cmd->data.macs_head);
-
+ INIT_LIST_HEAD(&new_cmd->group_head);
new_cmd->type = cmd;
new_cmd->done = false;
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
+
switch (cmd) {
case BNX2X_MCAST_CMD_ADD:
- cur_mac = (struct bnx2x_mcast_mac_elem *)
- ((u8 *)new_cmd + sizeof(*new_cmd));
-
- /* Push the MACs of the current command into the pending command
- * MACs list: FIFO
+ case BNX2X_MCAST_CMD_SET:
+ /* For a set command, we need to allocate sufficient memory for
+ * all the bins, since we can't analyze at this point how much
+ * memory would be required.
*/
+ total_elems = macs_list_len;
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ if (total_elems < BNX2X_MCAST_BINS_NUM)
+ total_elems = BNX2X_MCAST_BINS_NUM;
+ }
+ while (total_elems > 0) {
+ elem_group = (struct bnx2x_mcast_elem_group *)
+ __get_free_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!elem_group) {
+ bnx2x_free_groups(&new_cmd->group_head);
+ kfree(new_cmd);
+ return -ENOMEM;
+ }
+ total_elems -= MCAST_MAC_ELEMS_PER_PG;
+ list_add_tail(&elem_group->mcast_group_link,
+ &new_cmd->group_head);
+ }
+ elem_group = list_first_entry(&new_cmd->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
list_for_each_entry(pos, &p->mcast_list, link) {
- memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
- list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
- cur_mac++;
+ mac_elem = &elem_group->mcast_elems[offset].mac_elem;
+ memcpy(mac_elem->mac, pos->mac, ETH_ALEN);
+ /* Push the MACs of the current command into the pending
+ * command MACs list: FIFO
+ */
+ list_add_tail(&mac_elem->link,
+ &new_cmd->data.macs_head);
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
}
-
break;
case BNX2X_MCAST_CMD_DEL:
@@ -2771,7 +2837,8 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
int bin;
- if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) ||
+ (cmd == BNX2X_MCAST_CMD_SET_ADD))
rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
data->rules[idx].cmd_general_data |= rx_tx_add_flag;
@@ -2797,6 +2864,16 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
bin = cfg_data->bin;
break;
+ case BNX2X_MCAST_CMD_SET_ADD:
+ bin = cfg_data->bin;
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_SET_DEL:
+ bin = cfg_data->bin;
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return;
@@ -2932,6 +3009,110 @@ static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
cmd_pos->data.next_bin++;
}
+static void
+bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos)
+{
+ u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ struct bnx2x_mcast_bin_elem *p_item;
+ struct bnx2x_mcast_elem_group *elem_group;
+ int cnt = 0, mac_cnt = 0, offset = 0, i;
+
+ memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+ memcpy(cur, o->registry.aprox_match.vec,
+ sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+
+ /* Fill `current' with the required set of bins to configure */
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+ int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac);
+
+ DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n",
+ pmac_pos->mac);
+
+ BIT_VEC64_SET_BIT(req, bin);
+ list_del(&pmac_pos->link);
+ mac_cnt++;
+ }
+
+ /* We no longer have use for the MACs; Need to re-use memory for
+ * a list that will be used to configure bins.
+ */
+ cmd_pos->set_convert = true;
+ INIT_LIST_HEAD(&cmd_pos->data.macs_head);
+ elem_group = list_first_entry(&cmd_pos->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) {
+ bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
+ bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
+
+ if (b_current == b_required)
+ continue;
+
+ p_item = &elem_group->mcast_elems[offset].bin_elem;
+ p_item->bin = i;
+ p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD
+ : BNX2X_MCAST_CMD_SET_DEL;
+ list_add_tail(&p_item->link , &cmd_pos->data.macs_head);
+ cnt++;
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
+ }
+
+ /* We now definitely know how many commands are hiding here.
+ * Also need to correct the disruption we've added to guarantee this
+ * would be enqueued.
+ */
+ o->total_pending_num -= (o->max_cmd_len + mac_cnt);
+ o->total_pending_num += cnt;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num);
+}
+
+static void
+bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *cnt)
+{
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+ struct bnx2x_mcast_bin_elem *p_item, *p_item_n;
+
+ /* This is actually a 2-part scheme - it starts by converting the MACs
+ * into a list of bins to be added/removed, and correcting the numbers
+ * on the object. this is now allowed, as we're now sure that all
+ * previous configured requests have already applied.
+ * The second part is actually adding rules for the newly introduced
+ * entries [like all the rest of the hdl_pending functions].
+ */
+ if (!cmd_pos->set_convert)
+ bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos);
+
+ list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head,
+ link) {
+ cfg_data.bin = (u8)p_item->bin;
+ o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type);
+ (*cnt)++;
+
+ list_del(&p_item->link);
+
+ /* Break if we reached the maximum number of rules. */
+ if (*cnt >= o->max_cmd_len)
+ break;
+ }
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p)
{
@@ -2955,6 +3136,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
&cnt);
break;
+ case BNX2X_MCAST_CMD_SET:
+ bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt);
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
return -EINVAL;
@@ -2965,6 +3150,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
*/
if (cmd_pos->done) {
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
}
@@ -3095,6 +3281,19 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
o->set_registry_size(o, reg_sz + p->mcast_list_len);
break;
+ case BNX2X_MCAST_CMD_SET:
+ /* We can only learn how many commands would actually be used
+ * when this is being configured. So for now, simply guarantee
+ * the command will be enqueued [to refrain from adding logic
+ * that handles this and THEN learns it needs several ramrods].
+ * Just like for ADD/Cont, the mcast_list_len might be an over
+ * estimation; or even more so, since we don't take into
+ * account the possibility of removal of existing bins.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ o->total_pending_num += o->max_cmd_len;
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
@@ -3108,12 +3307,16 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins)
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
o->set_registry_size(o, old_num_bins);
o->total_pending_num -= p->mcast_list_len;
+
+ if (cmd == BNX2X_MCAST_CMD_SET)
+ o->total_pending_num -= o->max_cmd_len;
}
/**
@@ -3223,9 +3426,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
bnx2x_mcast_refresh_registry_e2(bp, o);
/* If CLEAR_ONLY was requested - don't send a ramrod and clear
- * RAMROD_PENDING status immediately.
+ * RAMROD_PENDING status immediately. due to the SET option, it's also
+ * possible that after evaluating the differences there's no need for
+ * a ramrod. In that case, we can skip it as well.
*/
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) {
raw->clear_pending(raw);
return 0;
} else {
@@ -3253,6 +3458,11 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
enum bnx2x_mcast_cmd cmd)
{
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1h!\n");
+ return -EINVAL;
+ }
+
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
p->mcast_list_len = 1;
@@ -3262,7 +3472,8 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins)
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
{
/* Do nothing */
}
@@ -3372,6 +3583,11 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1!\n");
+ return -EINVAL;
+ }
+
switch (cmd) {
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
@@ -3422,7 +3638,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_macs)
+ int old_num_macs,
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3572,6 +3789,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
}
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
return cnt;
@@ -3816,7 +4034,7 @@ error_exit2:
r->clear_pending(r);
error_exit1:
- o->revert(bp, p, old_reg_size);
+ o->revert(bp, p, old_reg_size, cmd);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 4048fc594cce..0bf2fd470819 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -536,6 +536,15 @@ enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
BNX2X_MCAST_CMD_RESTORE,
+
+ /* Following this, multicast configuration should equal to approx
+ * the set of MACs provided [i.e., remove all else].
+ * The two sub-commands are used internally to decide whether a given
+ * bin is to be added or removed
+ */
+ BNX2X_MCAST_CMD_SET,
+ BNX2X_MCAST_CMD_SET_ADD,
+ BNX2X_MCAST_CMD_SET_DEL,
};
struct bnx2x_mcast_obj {
@@ -635,7 +644,8 @@ struct bnx2x_mcast_obj {
*/
void (*revert)(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins);
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd);
int (*get_registry_size)(struct bnx2x_mcast_obj *o);
void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 632daff117d3..3f77d0863543 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -573,17 +573,6 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
}
- /* clear existing mcasts */
- mcast.mcast_list_len = vf->mcast_list_len;
- vf->mcast_list_len = mc_num;
- rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
- if (rc) {
- BNX2X_ERR("Failed to remove multicasts\n");
- kfree(mc);
- return rc;
- }
-
- /* update mcast list on the ramrod params */
if (mc_num) {
INIT_LIST_HEAD(&mcast.mcast_list);
for (i = 0; i < mc_num; i++) {
@@ -594,12 +583,18 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* add new mcasts */
mcast.mcast_list_len = mc_num;
- rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to add multicasts\n");
- kfree(mc);
+ BNX2X_ERR("Faled to set multicasts\n");
+ } else {
+ /* clear existing mcasts */
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc)
+ BNX2X_ERR("Failed to remove multicasts\n");
}
+ kfree(mc);
+
return rc;
}
@@ -1583,7 +1578,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
- vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2527,7 +2521,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
for_each_vf(bp, vfidx) {
bulletin = BP_VF_BULLETIN(bp, vfidx);
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
- bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
+ htons(ETH_P_8021Q));
}
}
@@ -2787,7 +2782,8 @@ static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
return 0;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct pf_vf_bulletin_content *bulletin = NULL;
struct bnx2x *bp = netdev_priv(dev);
@@ -2802,6 +2798,9 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
vfidx, vlan, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 670a581ffabc..7a6d406f4c11 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -195,7 +195,6 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
- int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 228c964e709a..a9f9f3738022 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -32,6 +32,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/rtc.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -93,50 +94,49 @@ enum board_idx {
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
+ BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
- BCM57304_VF,
- BCM57404_VF,
- BCM57414_VF,
- BCM57314_VF,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
};
/* indexed by enum above */
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
- { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
- { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
@@ -160,13 +160,19 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
- { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
- { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
- { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
- { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
#endif
{ 0 }
};
@@ -189,8 +195,7 @@ static const u16 bnxt_async_events_arr[] = {
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF ||
- idx == BCM57314_VF || idx == BCM57414_VF);
+ return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -3419,10 +3424,10 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+ vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
req.hash_type = cpu_to_le32(vnic->hash_type);
@@ -4156,6 +4161,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4187,12 +4197,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- if (is_valid_ether_addr(vf->mac_addr))
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- else
- random_ether_addr(bp->dev->dev_addr);
vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -4204,14 +4208,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
vf->max_vnics = le16_to_cpu(resp->max_vnics);
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+ return rc;
#endif
}
- bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
- bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
-
hwrm_func_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4249,6 +4260,9 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bp->max_tc = 1;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4307,6 +4321,31 @@ hwrm_ver_get_exit:
return rc;
}
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if IS_ENABLED(CONFIG_RTC_LIB)
+ struct hwrm_fw_set_time_input req = {0};
+ struct rtc_time tm;
+ struct timeval tv;
+
+ if (bp->hwrm_spec_code < 0x10400)
+ return -EOPNOTSUPP;
+
+ do_gettimeofday(&tv);
+ rtc_time_to_tm(tv.tv_sec, &tm);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+ req.year = cpu_to_le16(1900 + tm.tm_year);
+ req.month = 1 + tm.tm_mon;
+ req.day = tm.tm_mday;
+ req.hour = tm.tm_hour;
+ req.minute = tm.tm_min;
+ req.second = tm.tm_sec;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
int rc;
@@ -6804,6 +6843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ bnxt_hwrm_fw_set_time(bp);
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 23e04a6142fb..51b164a0e844 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.3.0"
+#define DRV_MODULE_VERSION "1.5.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 3
+#define DRV_VER_MIN 5
#define DRV_VER_UPD 0
struct tx_bd {
@@ -106,11 +106,11 @@ struct tx_cmp {
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
#define CMP_TYPE_ERROR_STATUS 48
- #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
- #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
@@ -389,11 +389,6 @@ struct rx_tpa_end_cmp_ext {
#define INVALID_HW_RING_ID ((u16)-1)
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
-
/* The hardware supports certain page sizes. Use the supported page sizes
* to allocate the rings.
*/
@@ -418,7 +413,7 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
-#define BNXT_MIN_PKT_SIZE 45
+#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -1225,6 +1220,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b83e17403d6c..a7e04ff4eaed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -21,6 +21,8 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
@@ -346,7 +348,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_rx_rings;
+ channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -404,8 +406,8 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
max_tx_rings /= tcs;
- if (sh && (channel->combined_count > max_rx_rings ||
- channel->combined_count > max_tx_rings))
+ if (sh &&
+ channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
return -ENOMEM;
if (!sh && (channel->rx_count > max_rx_rings ||
@@ -428,8 +430,10 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = channel->combined_count;
- bp->tx_nr_rings_per_tc = channel->combined_count;
+ bp->rx_nr_rings = min_t(int, channel->combined_count,
+ max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
+ max_tx_rings);
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
@@ -1028,6 +1032,10 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+
static int bnxt_flash_nvram(struct net_device *dev,
u16 dir_type,
u16 dir_ordinal,
@@ -1179,7 +1187,6 @@ static int bnxt_flash_firmware(struct net_device *dev,
(unsigned long)calculated_crc);
return -EINVAL;
}
- /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
@@ -1188,6 +1195,57 @@ static int bnxt_flash_firmware(struct net_device *dev,
return rc;
}
+static int bnxt_flash_microcode(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ struct bnxt_ucode_trailer *trailer;
+ u32 calculated_crc;
+ u32 stored_crc;
+ int rc = 0;
+
+ if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+ sizeof(*trailer)));
+ if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+ netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+ le32_to_cpu(trailer->sig));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->dir_type) != dir_type) {
+ netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+ dir_type, le16_to_cpu(trailer->dir_type));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->trailer_length) <
+ sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode trailer length: %d\n",
+ le16_to_cpu(trailer->trailer_length));
+ return -EINVAL;
+ }
+
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev,
+ "CRC32 (%08lX) does not match calculated: %08lX\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+
+ return rc;
+}
+
static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
{
switch (dir_type) {
@@ -1206,7 +1264,7 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
return false;
}
-static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
{
switch (dir_type) {
case BNX_DIR_TYPE_AVS:
@@ -1227,7 +1285,7 @@ static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
static bool bnxt_dir_type_is_executable(u16 dir_type)
{
return bnxt_dir_type_is_ape_bin_format(dir_type) ||
- bnxt_dir_type_is_unprotected_exec_format(dir_type);
+ bnxt_dir_type_is_other_exec_format(dir_type);
}
static int bnxt_flash_firmware_from_file(struct net_device *dev,
@@ -1237,10 +1295,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (dir_type != BNX_DIR_TYPE_UPDATE &&
- bnxt_dir_type_is_executable(dir_type) == false)
- return -EINVAL;
-
rc = request_firmware(&fw, filename, &dev->dev);
if (rc != 0) {
netdev_err(dev, "Error %d requesting firmware file: %s\n",
@@ -1249,6 +1303,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw->data, fw->size);
@@ -1257,10 +1313,83 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename)
+ char *filename, u32 install_type)
{
- netdev_err(dev, "packages are not yet supported\n");
- return -EINVAL;
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_install_update_input install = {0};
+ const struct firmware *fw;
+ u32 item_len;
+ u16 index;
+ int rc;
+
+ bnxt_hwrm_fw_set_time(bp);
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL) != 0) {
+ netdev_err(dev, "PKG update area not created in nvram\n");
+ return -ENOBUFS;
+ }
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ if (fw->size > item_len) {
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ (unsigned long)fw->size);
+ rc = -EFBIG;
+ } else {
+ dma_addr_t dma_handle;
+ u8 *kmem;
+ struct hwrm_nvm_modify_input modify = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+ modify.dir_idx = cpu_to_le16(index);
+ modify.len = cpu_to_le32(fw->size);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+ &dma_handle, GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev,
+ "dma_alloc_coherent failure, length = %u\n",
+ (unsigned int)fw->size);
+ rc = -ENOMEM;
+ } else {
+ memcpy(kmem, fw->data, fw->size);
+ modify.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+ dma_handle);
+ }
+ }
+ release_firmware(fw);
+ if (rc)
+ return rc;
+
+ if ((install_type & 0xffff) == 0)
+ install_type >>= 16;
+ bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+ install.install_type = cpu_to_le32(install_type);
+
+ rc = hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (resp->result) {
+ netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+ (s8)resp->result, (int)resp->problem_item);
+ return -ENOPKG;
+ }
+ return 0;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -1271,8 +1400,10 @@ static int bnxt_flash_device(struct net_device *dev,
return -EINVAL;
}
- if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
- return bnxt_flash_package_from_file(dev, flash->data);
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+ flash->region > 0xffff)
+ return bnxt_flash_package_from_file(dev, flash->data,
+ flash->region);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -1516,7 +1647,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
/* Create or re-write an NVM item: */
if (bnxt_dir_type_is_executable(type) == true)
- return -EINVAL;
+ return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
@@ -1718,6 +1849,25 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int bnxt_nway_reset(struct net_device *dev)
+{
+ int rc = 0;
+
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -1750,4 +1900,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .nway_reset = bnxt_nway_reset
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 82bf44ab811b..cad30ddc6936 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -11,6 +11,7 @@
#define __BNXT_FW_HDR_H__
#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */
enum SUPPORTED_FAMILY {
DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
@@ -85,7 +86,7 @@ enum SUPPORTED_MEDIA {
struct bnxt_fw_header {
__le32 signature; /* constains the constant value of
- * BNXT_Firmware_Bin_Signatures
+ * BNXT_FIRMWARE_BIN_SIGNATURE
*/
u8 flags; /* reserved for ChiMP use */
u8 code_type; /* enum SUPPORTED_CODE */
@@ -102,4 +103,17 @@ struct bnxt_fw_header {
u8 major_ver;
};
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+ u8 rsa_sig[256];
+ __le16 flags;
+ u8 version_format;
+ u8 version_length;
+ u8 version[16];
+ __le16 dir_type;
+ __le16 trailer_length;
+ __le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */
+ __le32 chksum; /* CRC-32 */
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 517567f6d651..04a96cc3498a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -39,7 +39,7 @@ struct eject_cmpl {
__le16 type;
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
__le16 len;
__le32 opaque;
__le32 v;
@@ -52,7 +52,7 @@ struct hwrm_cmpl {
__le16 type;
#define HWRM_CMPL_TYPE_MASK 0x3fUL
#define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
@@ -65,7 +65,7 @@ struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
#define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
@@ -81,7 +81,7 @@ struct hwrm_fwd_resp_cmpl {
__le16 type;
#define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
@@ -96,25 +96,26 @@ struct hwrm_async_event_cmpl {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
@@ -130,9 +131,9 @@ struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
@@ -156,9 +157,9 @@ struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
@@ -176,9 +177,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
@@ -200,8 +201,7 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -211,9 +211,9 @@ struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
@@ -231,9 +231,9 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
@@ -258,9 +258,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
@@ -278,9 +278,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
@@ -300,9 +300,9 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
@@ -320,9 +320,9 @@ struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
@@ -340,9 +340,9 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
@@ -362,9 +362,9 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
@@ -384,9 +384,9 @@ struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
@@ -404,9 +404,9 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
@@ -424,9 +424,9 @@ struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
@@ -443,9 +443,9 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
@@ -465,15 +465,15 @@ struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
@@ -485,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.3.0 */
+/* HW Resource Manager Specification 1.5.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 3
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 5
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_STR "1.3.0"
+#define HWRM_VERSION_STR "1.5.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -556,8 +556,8 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
- #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
@@ -574,6 +574,7 @@ struct cmd_nums {
#define HWRM_VNIC_RSS_QCFG (0x47UL)
#define HWRM_VNIC_PLCMODES_CFG (0x48UL)
#define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
#define HWRM_RING_ALLOC (0x50UL)
#define HWRM_RING_FREE (0x51UL)
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
@@ -581,13 +582,15 @@ struct cmd_nums {
#define HWRM_RING_RESET (0x5eUL)
#define HWRM_RING_GRP_ALLOC (0x60UL)
#define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
#define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED3 (0x94UL)
+ #define RESERVED4 (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -607,6 +610,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
#define HWRM_FW_RESET (0xc0UL)
#define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
#define HWRM_EXEC_FWD_RESP (0xd0UL)
#define HWRM_REJECT_FWD_RESP (0xd1UL)
#define HWRM_FWD_RESP (0xd2UL)
@@ -615,11 +620,13 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_ALLOC (0xf0UL)
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
#define HWRM_NVM_MODIFY (0xfff4UL)
#define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
#define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
@@ -824,7 +831,9 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_min;
u8 netctrl_fw_bld;
u8 netctrl_fw_rsvd;
- __le32 reserved1;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -839,9 +848,9 @@ struct hwrm_ver_get_output {
u8 chip_metal;
u8 chip_bond_id;
u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
@@ -863,10 +872,10 @@ struct hwrm_func_reset_input {
#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
__le16 vf_id;
u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
u8 unused_0;
};
@@ -1028,6 +1037,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1047,9 +1060,8 @@ struct hwrm_func_qcaps_output {
__le32 max_mcast_filters;
__le32 max_flow_id;
__le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
u8 unused_0;
- u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -1077,6 +1089,7 @@ struct hwrm_func_qcfg_output {
__le16 flags;
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1089,29 +1102,46 @@ struct hwrm_func_qcfg_output {
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
u8 unused_0;
__le16 dflt_vnic_id;
u8 unused_1;
u8 unused_2;
__le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_3;
- __le16 unused_4;
+ __le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
+ __le16 alloc_sp_tx_rings;
+ u8 unused_4;
u8 valid;
};
@@ -1171,18 +1201,36 @@ struct hwrm_func_cfg_input {
__le16 dflt_vlan;
__be32 dflt_ip_addr[4];
__le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
__le16 async_event_cr;
u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
u8 allowed_vlan_pris;
u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
u8 unused_2;
__le16 num_mcast_filters;
};
@@ -1341,16 +1389,16 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
__le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1415,13 +1463,13 @@ struct hwrm_func_buf_rgtr_input {
__le16 vf_id;
__le16 req_buf_num_pages;
__le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
__le16 req_buf_len;
__le16 resp_buf_len;
u8 unused_0;
@@ -1473,16 +1521,16 @@ struct hwrm_func_drv_qver_output {
__le16 seq_id;
__le16 resp_len;
__le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1528,44 +1576,44 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1582,12 +1630,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1641,25 +1689,25 @@ struct hwrm_port_phy_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
u8 unused_0;
__le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
u8 duplex;
- #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
@@ -1679,39 +1727,39 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1728,46 +1776,46 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
@@ -1796,11 +1844,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
@@ -1859,7 +1907,7 @@ struct hwrm_port_mac_cfg_input {
__le64 resp_addr;
__le32 flags;
#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
@@ -1868,28 +1916,50 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
__le16 port_id;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
- u8 ivlan_pri2cos_map_pri;
- u8 lcos_map_pri;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
__le16 rx_ts_capture_ptp_msg_type;
__le16 tx_ts_capture_ptp_msg_type;
- __le32 unused_0;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -1902,9 +1972,9 @@ struct hwrm_port_mac_cfg_output {
__le16 mtu;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
u8 unused_0;
u8 valid;
};
@@ -2163,8 +2233,8 @@ struct hwrm_queue_qportcfg_input {
__le64 resp_addr;
__le32 flags;
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
@@ -2179,50 +2249,51 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_queues;
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
- u8 queue_buffers_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
u8 queue_id0;
u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id1;
u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id2;
u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id3;
u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id4;
u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id5;
u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id6;
u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id7;
u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 valid;
};
@@ -2235,19 +2306,21 @@ struct hwrm_queue_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
__le32 queue_id;
__le32 dflt_len;
u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 unused_0[7];
};
@@ -2264,50 +2337,6 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
-/* hwrm_queue_buffers_cfg */
-/* Input (56 bytes) */
-struct hwrm_queue_buffers_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
- __le32 enables;
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL
- __le32 queue_id;
- __le32 reserved;
- __le32 shared;
- __le32 xoff;
- __le32 xon;
- __le32 full;
- __le32 notfull;
- __le32 max;
-};
-
-/* Output (16 bytes) */
-struct hwrm_queue_buffers_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2351,12 +2380,22 @@ struct hwrm_queue_pri2cos_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
__le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
u8 port_id;
u8 pri0_cos_queue_id;
u8 pri1_cos_queue_id;
@@ -2404,82 +2443,226 @@ struct hwrm_queue_cos2bw_cfg_input {
u8 queue_id0;
u8 unused_0;
__le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
__le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
__le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
__le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
__le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
__le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
__le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
__le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -2563,6 +2746,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2615,18 +2799,18 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
__le16 vnic_id;
__le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
__le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
u8 unused_0;
u8 unused_1;
__le32 max_agg_timer;
@@ -2780,15 +2964,15 @@ struct hwrm_ring_alloc_input {
__le64 resp_addr;
__le32 enables;
#define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -2804,18 +2988,36 @@ struct hwrm_ring_alloc_input {
u8 unused_4;
u8 unused_5;
__le32 reserved1;
- __le16 reserved2;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
u8 unused_6;
u8 unused_7;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
__le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
u8 unused_8[3];
};
@@ -2842,9 +3044,9 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2942,9 +3144,9 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3068,36 +3270,36 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 t_l2_ivlan;
__le16 t_l2_ivlan_mask;
u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
u8 unused_6;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7;
__le16 dst_id;
__le16 mirror_vnic_id;
u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
u8 unused_8;
__le32 unused_9;
__le64 l2_filter_id_hint;
@@ -3246,16 +3448,16 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le32 vni;
__le32 dst_vnic_id;
@@ -3311,14 +3513,14 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
__le32 encap_data[16];
@@ -3397,32 +3599,32 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
u8 src_macaddr[6];
__be16 ethertype;
u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
__be32 src_ipaddr[4];
__be32 src_ipaddr_mask[4];
__be32 dst_ipaddr[4];
@@ -3511,8 +3713,8 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0[7];
};
@@ -3539,8 +3741,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__be16 tunnel_dst_port_val;
__le32 unused_1;
@@ -3570,8 +3772,8 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -3720,15 +3922,15 @@ struct hwrm_fw_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
__le16 unused_0[3];
};
@@ -3739,9 +3941,9 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3759,11 +3961,11 @@ struct hwrm_fw_qstatus_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 unused_0[7];
};
@@ -3774,9 +3976,9 @@ struct hwrm_fw_qstatus_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3785,6 +3987,42 @@ struct hwrm_fw_qstatus_output {
u8 valid;
};
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -3921,32 +4159,6 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_raw_write_blk */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_write_blk_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le32 dest_addr;
- __le32 len;
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_raw_write_blk_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_nvm_read */
/* Input (40 bytes) */
struct hwrm_nvm_read_input {
@@ -4132,9 +4344,9 @@ struct hwrm_nvm_find_dir_entry_input {
u8 opt_ordinal;
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
u8 unused_1[3];
};
@@ -4266,4 +4478,41 @@ struct hwrm_nvm_verify_update_output {
u8 valid;
};
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 50d2007a2640..ec6cd18842c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -19,6 +19,45 @@
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf, u16 event_id)
+{
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_async_event_cmpl *async_cmpl;
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@@ -135,7 +174,8 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -146,6 +186,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -243,8 +286,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
rc = -EINVAL;
break;
}
- /* CHIMP TODO: send msg to VF to update new link state */
-
+ if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -525,46 +569,6 @@ err_out1:
return rc;
}
-static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
- struct bnxt_vf_info *vf,
- u16 event_id)
-{
- int rc = 0;
- struct hwrm_fwd_async_event_cmpl_input req = {0};
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_async_event_cmpl *async_cmpl;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
- if (vf)
- req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
- else
- /* broadcast this async event to all VFs */
- req.encap_async_event_target_id = cpu_to_le16(0xffff);
- async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
- async_cmpl->event_id = cpu_to_le16(event_id);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
- rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 0392670ab49c..1ab72e4820af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -12,7 +12,7 @@
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
-int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 541456398dfb..4464bc5db934 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,8 +450,8 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -461,11 +461,11 @@ static int bcmgenet_get_settings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(priv->phydev, cmd);
}
-static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -475,7 +475,7 @@ static int bcmgenet_set_settings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(priv->phydev, cmd);
}
static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -979,12 +979,10 @@ static int bcmgenet_nway_reset(struct net_device *dev)
}
/* standard ethtool support functions. */
-static struct ethtool_ops bcmgenet_ethtool_ops = {
+static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
- .get_settings = bcmgenet_get_settings,
- .set_settings = bcmgenet_set_settings,
.get_drvinfo = bcmgenet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = bcmgenet_get_msglevel,
@@ -996,6 +994,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_link_ksettings = bcmgenet_get_link_ksettings,
+ .set_link_ksettings = bcmgenet_set_link_ksettings,
};
/* Power down the unimac, based on mode. */
@@ -2669,128 +2669,6 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
-static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
- u32 f_index, u32 rx_queue)
-{
- u32 offset;
- u32 reg;
-
- offset = f_index / 8;
- reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
- reg &= ~(0xF << (4 * (f_index % 8)));
- reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
- bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
-}
-
-static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
- u32 f_index, u32 f_length)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg &= ~(0xFF << (8 * (f_index % 4)));
- reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
-{
- int f_index;
- u32 i;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
-
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- for (i = 0; i < f_length; i++)
- bcmgenet_hfb_writel(priv, f_data[i],
- (f_index * priv->hw_params->hfb_filter_size + i) *
- sizeof(u32));
-
- bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
- bcmgenet_hfb_enable_filter(priv, f_index);
- bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
-
- return 0;
-}
-
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ea967df4b202..a927a730da10 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12079,95 +12079,107 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return ret;
}
-static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
+ u32 supported, advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
+ supported = (SUPPORTED_Autoneg);
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- cmd->supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP);
- cmd->port = PORT_TP;
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP);
+ cmd->base.port = PORT_TP;
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
- cmd->advertising = tp->link_config.advertising;
+ advertising = tp->link_config.advertising;
if (tg3_flag(tp, PAUSE_AUTONEG)) {
if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
} else {
- cmd->advertising |= ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
}
} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
}
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (netif_running(dev) && tp->link_up) {
- ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
- cmd->duplex = tp->link_config.active_duplex;
- cmd->lp_advertising = tp->link_config.rmt_adv;
+ cmd->base.speed = tp->link_config.active_speed;
+ cmd->base.duplex = tp->link_config.active_duplex;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.lp_advertising,
+ tp->link_config.rmt_adv);
+
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
- }
- cmd->phy_address = tp->phy_addr;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = tp->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ }
+ cmd->base.phy_address = tp->phy_addr;
+ cmd->base.autoneg = tp->link_config.autoneg;
return 0;
}
-static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- cmd->duplex != DUPLEX_FULL &&
- cmd->duplex != DUPLEX_HALF)
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_HALF)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 mask = ADVERTISED_Autoneg |
ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
@@ -12185,7 +12197,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else
mask |= ADVERTISED_FIBRE;
- if (cmd->advertising & ~mask)
+ if (advertising & ~mask)
return -EINVAL;
mask &= (ADVERTISED_1000baseT_Half |
@@ -12195,13 +12207,13 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- cmd->advertising &= mask;
+ advertising &= mask;
} else {
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
if (speed != SPEED_1000)
return -EINVAL;
- if (cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
if (speed != SPEED_100 &&
@@ -12212,16 +12224,16 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tg3_full_lock(tp, 0);
- tp->link_config.autoneg = cmd->autoneg;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- tp->link_config.advertising = (cmd->advertising |
+ tp->link_config.autoneg = cmd->base.autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = (advertising |
ADVERTISED_Autoneg);
tp->link_config.speed = SPEED_UNKNOWN;
tp->link_config.duplex = DUPLEX_UNKNOWN;
} else {
tp->link_config.advertising = 0;
tp->link_config.speed = speed;
- tp->link_config.duplex = cmd->duplex;
+ tp->link_config.duplex = cmd->base.duplex;
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -14094,8 +14106,6 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
- .get_settings = tg3_get_settings,
- .set_settings = tg3_set_settings,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -14128,6 +14138,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_ts_info = tg3_get_ts_info,
.get_eee = tg3_get_eee,
.set_eee = tg3_set_eee,
+ .get_link_ksettings = tg3_get_link_ksettings,
+ .set_link_ksettings = tg3_set_link_ksettings,
};
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 771cc267f217..f9df4b5ae90e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -54,9 +54,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
* Global variables
*/
static u32 bnad_rxqs_per_cq = 2;
-static u32 bna_id;
-static struct mutex bnad_list_mutex;
-static LIST_HEAD(bnad_list);
+static atomic_t bna_id;
static const u8 bnad_bcast_addr[] __aligned(2) =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -76,23 +74,6 @@ do { \
(_res_info)->res_u.mem_info.len = (_size); \
} while (0)
-static void
-bnad_add_to_list(struct bnad *bnad)
-{
- mutex_lock(&bnad_list_mutex);
- list_add_tail(&bnad->list_entry, &bnad_list);
- bnad->id = bna_id++;
- mutex_unlock(&bnad_list_mutex);
-}
-
-static void
-bnad_remove_from_list(struct bnad *bnad)
-{
- mutex_lock(&bnad_list_mutex);
- list_del(&bnad->list_entry);
- mutex_unlock(&bnad_list_mutex);
-}
-
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -3573,14 +3554,12 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
- mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
- mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3653,7 +3632,7 @@ bnad_pci_probe(struct pci_dev *pdev,
}
bnad = netdev_priv(netdev);
bnad_lock_init(bnad);
- bnad_add_to_list(bnad);
+ bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
/*
@@ -3807,7 +3786,6 @@ pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
- bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3845,7 +3823,6 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
- bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
/* Remove the debugfs node for this bnad */
kfree(bnad->regdata);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f4ed816b93ee..46f7b842b39c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -288,7 +288,6 @@ struct bnad_rx_unmap_q {
struct bnad {
struct net_device *netdev;
u32 id;
- struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d954a97b0b0b..63144bb413d1 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -541,6 +541,14 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
}
+static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+{
+ desc->addr = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ desc->addrh = (u32)(addr >> 32);
+#endif
+}
+
static void macb_tx_error_task(struct work_struct *work)
{
struct macb_queue *queue = container_of(work, struct macb_queue,
@@ -621,14 +629,17 @@ static void macb_tx_error_task(struct work_struct *work)
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
- desc->addr = 0;
+ macb_set_addr(desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- queue_writel(queue, TBQP, queue->tx_ring_dma);
+ queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -750,7 +761,7 @@ static void gem_rx_refill(struct macb *bp)
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
- bp->rx_ring[entry].addr = paddr;
+ macb_set_addr(&(bp->rx_ring[entry]), paddr);
bp->rx_ring[entry].ctrl = 0;
/* properly align Ethernet header */
@@ -798,7 +809,9 @@ static int gem_rx(struct macb *bp, int budget)
int count = 0;
while (count < budget) {
- u32 addr, ctrl;
+ u32 ctrl;
+ dma_addr_t addr;
+ bool rxused;
entry = macb_rx_ring_wrap(bp->rx_tail);
desc = &bp->rx_ring[entry];
@@ -806,10 +819,14 @@ static int gem_rx(struct macb *bp, int budget)
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
+ rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ addr |= ((u64)(desc->addrh) << 32);
+#endif
ctrl = desc->ctrl;
- if (!(addr & MACB_BIT(RX_USED)))
+ if (!rxused)
break;
bp->rx_tail++;
@@ -835,7 +852,6 @@ static int gem_rx(struct macb *bp, int budget)
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
skb_put(skb, len);
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
bp->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1299,7 +1315,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BIT(TX_WRAP);
/* Set TX buffer descriptor */
- desc->addr = tx_skb->mapping;
+ macb_set_addr(desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -1382,7 +1398,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (macb_clear_csum(skb)) {
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
@@ -1445,6 +1461,9 @@ static void gem_free_rx_buffers(struct macb *bp)
desc = &bp->rx_ring[i];
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ addr |= ((u64)(desc->addrh) << 32);
+#endif
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1570,7 +1589,7 @@ static void gem_init_rings(struct macb *bp)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < TX_RING_SIZE; i++) {
- queue->tx_ring[i].addr = 0;
+ macb_set_addr(&(queue->tx_ring[i]), 0);
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
}
queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
@@ -1717,6 +1736,10 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BIT(TXCOEN);
else
dmacfg &= ~GEM_BIT(TXCOEN);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dmacfg |= GEM_BIT(ADDR64);
+#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -1762,9 +1785,15 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, TBQP, queue->tx_ring_dma);
+ queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+#endif
/* Enable interrupts */
queue_writel(queue, IER,
@@ -2326,7 +2355,8 @@ static void macb_probe_queues(void __iomem *mem,
}
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk)
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk)
{
int err;
@@ -2348,6 +2378,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (IS_ERR(*tx_clk))
*tx_clk = NULL;
+ *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
@@ -2366,8 +2400,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
goto err_disable_hclk;
}
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
return 0;
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+
err_disable_hclk:
clk_disable_unprepare(*hclk);
@@ -2402,6 +2445,9 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue->TBQPH = GEM_TBQPH(hw_q -1);
+#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -2409,6 +2455,9 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue->TBQPH = MACB_TBQPH;
+#endif
}
/* get irq: here we use the linux queue index, not the hardware
@@ -2751,12 +2800,14 @@ static const struct net_device_ops at91ether_netdev_ops = {
};
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk)
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk)
{
int err;
*hclk = NULL;
*tx_clk = NULL;
+ *rx_clk = NULL;
*pclk = devm_clk_get(&pdev->dev, "ether_clk");
if (IS_ERR(*pclk))
@@ -2880,13 +2931,13 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static int macb_probe(struct platform_device *pdev)
{
int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **)
+ struct clk **, struct clk **, struct clk **)
= macb_clk_init;
int (*init)(struct platform_device *) = macb_init;
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
const struct macb_config *macb_config = NULL;
- struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
+ struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
bool native_io;
@@ -2914,7 +2965,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk);
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
if (err)
return err;
@@ -2950,6 +3001,7 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
+ bp->rx_clk = rx_clk;
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
@@ -2958,6 +3010,11 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+#endif
+
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -2968,7 +3025,7 @@ static int macb_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
err = dev->irq;
- goto err_disable_clocks;
+ goto err_out_free_netdev;
}
mac = of_get_mac_address(np);
@@ -3043,6 +3100,7 @@ err_disable_clocks:
clk_disable_unprepare(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
+ clk_disable_unprepare(rx_clk);
return err;
}
@@ -3069,6 +3127,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
free_netdev(dev);
}
@@ -3092,6 +3151,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
}
return 0;
@@ -3111,6 +3171,7 @@ static int __maybe_unused macb_resume(struct device *dev)
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
+ clk_prepare_enable(bp->rx_clk);
}
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index b6fcf10621b6..8bed4b52fef5 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -66,6 +66,8 @@
#define MACB_USRIO 0x00c0
#define MACB_WOL 0x00c4
#define MACB_MID 0x00fc
+#define MACB_TBQPH 0x04C8
+#define MACB_RBQPH 0x04D4
/* GEM register offsets. */
#define GEM_NCFGR 0x0004 /* Network Config */
@@ -139,6 +141,7 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
+#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
@@ -249,6 +252,8 @@
#define GEM_RXBS_SIZE 8
#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
#define GEM_DDRP_SIZE 1
+#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
+#define GEM_ADDR64_SIZE 1
/* Bitfields in NSR */
@@ -474,6 +479,10 @@
struct macb_dma_desc {
u32 addr;
u32 ctrl;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u32 addrh;
+ u32 resvd;
+#endif
};
/* DMA descriptor bitfields */
@@ -763,7 +772,8 @@ struct macb_config {
u32 caps;
unsigned int dma_burst_length;
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk);
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk);
int (*init)(struct platform_device *pdev);
int jumbo_max_len;
};
@@ -777,6 +787,7 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
+ unsigned int TBQPH;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
@@ -809,6 +820,7 @@ struct macb {
struct clk *pclk;
struct clk *hclk;
struct clk *tx_clk;
+ struct clk *rx_clk;
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 0ef232d3331e..92f411c9f0df 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX
depends on 64BIT
select PHYLIB
select MDIO_THUNDER
+ select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
+config THUNDER_NIC_RGX
+ tristate "Thunder MAC interface driver (RGX)"
+ depends on 64BIT
+ select PHYLIB
+ select MDIO_THUNDER
+ ---help---
+ This driver supports configuring XCV block of RGX interface
+ present on CN81XX chip.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
@@ -48,7 +58,7 @@ config LIQUIDIO
select LIBCRC32C
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
- based on CN66XX and CN68XX chips.
+ based on CN66XX, CN68XX and CN23XX chips.
To compile this driver as a module, choose M here: the module
will be called liquidio. This is recommended.
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index 2f366806835d..5a27b2a44039 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -3,14 +3,16 @@
#
obj-$(CONFIG_LIQUIDIO) += liquidio.o
-liquidio-objs := lio_main.o \
- lio_ethtool.o \
- request_manager.o \
- response_manager.o \
- octeon_device.o \
- cn66xx_device.o \
- cn68xx_device.o \
- octeon_mem_ops.o \
- octeon_droq.o \
- octeon_console.o \
- octeon_nic.o
+liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
+ lio_core.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ cn23xx_pf_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_nic.o
+
+liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
new file mode 100644
index 000000000000..bddb198c0b74
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -0,0 +1,1237 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_pf_device.h"
+#include "octeon_main.h"
+
+#define RESET_NOTDONE 0
+#define RESET_DONE 1
+
+/* Change the value of SLI Packet Input Jabber Register to allow
+ * VXLAN TSO packets which can be 64424 bytes, exceeding the
+ * MAX_GSO_SIZE we supplied to the kernel
+ */
+#define CN23XX_INPUT_JABBER 64600
+
+#define LIOLUT_RING_DISTRIBUTION 9
+const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = {
+ 0, 8, 4, 2, 2, 2, 1, 1, 1
+};
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
+{
+ int i = 0;
+ u32 regval = 0;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ /*In cn23xx_soft_reset*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
+ "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
+
+ /*In cn23xx_set_dpi_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
+ lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_ENB", i,
+ CN23XX_DPI_DMA_ENG_ENB(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_BUF", i,
+ CN23XX_DPI_DMA_ENG_BUF(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
+ CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
+
+ /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_CONFIG_PCIE_DEVCTL",
+ CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
+
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
+ CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
+
+ /*In cn23xx_specific_regs_setup */
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
+ CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
+ (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+
+ /*In cn23xx_setup_global_mac_regs*/
+ for (i = 0; i < CN23XX_MAX_MACS; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_MAC_RINFO64", i,
+ CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64
+ (i, oct->pf_num))));
+ }
+
+ /*In cn23xx_setup_global_input_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_PKT_CONTROL64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
+ }
+
+ /*In cn23xx_setup_global_output_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_CONTROL", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
+ }
+
+ /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_enb_reg64",
+ CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_sum_reg64",
+ CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
+
+ /*In cn23xx_setup_iq_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_IQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_DOORBELL", i,
+ CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_DOORBELL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_INSTR_COUNT64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
+ }
+
+ /*In cn23xx_setup_oq_regs*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_OQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_SENT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_CREDIT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_TIME_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_CNT_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
+}
+
+static int cn23xx_pf_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
+ oct->octeon_id);
+
+ octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
+
+ /* Initiate chip-wide soft reset */
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
+
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
+ oct->octeon_id);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
+ oct->octeon_id);
+
+ /* restore the reset value*/
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+static void cn23xx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 regval;
+ u32 uncorrectable_err_mask, corrtable_err_status;
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
+ uncorrectable_err_mask = 0;
+ corrtable_err_status = 0;
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
+ &uncorrectable_err_mask);
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
+ &corrtable_err_status);
+ dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
+ "\tdev_ctl_status_reg = 0x%08x\n"
+ "\tuncorrectable_error_mask_reg = 0x%08x\n"
+ "\tcorrectable_error_status_reg = 0x%08x\n",
+ regval, uncorrectable_err_mask,
+ corrtable_err_status);
+ }
+
+ regval |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
+ oct->octeon_id);
+ pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
+
+static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
+ * for SLI.
+ */
+
+ /* TBD: get the info in Hand-shake */
+ return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
+}
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
+
+ oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+
+ /* programming SRN and TRS for each MAC(0..3) */
+
+ dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
+ __func__, mac_no);
+ /* By default, mapping all 64 IOQs to a single MACs */
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
+
+ if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ } else {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
+ }
+
+ /* setting TRS <23:16> */
+ reg_val = reg_val |
+ (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+ /* write these settings to MAC register */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
+ reg_val);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
+ mac_no, pf_num, (u64)octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
+}
+
+static int cn23xx_reset_io_queues(struct octeon_device *oct)
+{
+ int ret_val = 0;
+ u64 d64;
+ u32 q_no, srn, ern;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ /*As per HRM reg description, s/w cant write 0 to ENB. */
+ /*to make the queue off, need to set the RST bit. */
+
+ /* Reset the Enable bit for all the 64 IQs. */
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
+ }
+
+ /*wait until the RST bit is clear or the RST and quite bits are set*/
+ for (q_no = srn; q_no < ern; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
+{
+ u32 q_no, ern, srn;
+ u64 pf_num;
+ u64 intr_threshold, reg_val;
+ struct octeon_instr_queue *iq;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ pf_num = oct->pf_num;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (cn23xx_reset_io_queues(oct))
+ return -1;
+
+ /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues.Only PF can set these bits.
+ * bits 29:30 indicate the MAC num.
+ * bits 32:47 indicate the PVF num.
+ */
+ for (q_no = 0; q_no < ern; q_no++) {
+ reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+ reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * pf queues
+ */
+ for (q_no = srn; q_no < ern; q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ iq = oct->instr_queue[q_no];
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ /* Set WMARK level for triggering PI_INT */
+ /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no, ern, srn;
+ u64 time_threshold;
+
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
+ } else {
+ /** Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
+ }
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+
+ /* Enabling these interrupt in oct->fn_list.enable_interrupt()
+ * routine which called after IOQ init.
+ * Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ (time_threshold << 32)));
+ }
+
+ /** Setting the water mark level for pko back pressure **/
+ writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
+
+ /** Disabling setting OQs in reset when ring has no dorebells
+ * enabling this will cause of head of line blocking
+ */
+ /* Do it only for pass1.1. and pass1.2 */
+ if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
+ (oct->rev_id == OCTEON_CN23XX_REV_1_1))
+ writeq(readq((u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_GBL_CONTROL) | 0x2,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
+
+ /** Enable channel-level backpressure */
+ if (oct->pf_num)
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
+ else
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
+}
+
+static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
+{
+ cn23xx_enable_error_reporting(oct);
+
+ /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ cn23xx_setup_global_mac_regs(oct);
+
+ if (cn23xx_pf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_pf_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
+ CN23XX_SLI_WINDOW_CTL_DEFAULT);
+
+ /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
+ return 0;
+}
+
+static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ iq_no += oct->sriov_info.pf_srn;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ } else {
+ /* Clear the count by writing back what we read, but don't
+ * enable interrupts
+ */
+ writeq(pkt_in_done, iq->inst_cnt_reg);
+ }
+
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 reg_val;
+ struct octeon_droq *droq = oct->droq[oq_no];
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 time_threshold;
+ u64 cnt_threshold;
+
+ oq_no += oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ if (!oct->msix_on) {
+ /* Enable this output queue to generate Packet Timer Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+
+ /* Enable this output queue to generate Packet Count Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+ } else {
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+ cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
+ ((time_threshold << 32 | cnt_threshold)));
+ }
+}
+
+static int cn23xx_enable_io_queues(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u32 srn, ern, q_no;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
+ /* IOQs are in reset by default in PEM2 mode,
+ * clearing reset bit
+ */
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ reg_val = octeon_read_csr64(
+ oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ }
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = srn; q_no < ern; q_no++) {
+ u32 reg_val;
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+ return 0;
+}
+
+static void cn23xx_disable_io_queues(struct octeon_device *oct)
+{
+ int q_no, loop;
+ u64 d64;
+ u32 d32;
+ u32 srn, ern;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ /*** Disable Input Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* start the Reset for a particular ring */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ WRITE_ONCE(d64, READ_ONCE(d64) &
+ (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
+ WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(d64));
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Input Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
+ while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+ }
+
+ /*** Disable Output Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.It given that SLI_PKT_RING_RST is
+ * common for both IQs and OQs
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Output Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+ while (octeon_read_csr64(oct,
+ CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ WRITE_ONCE(d32, octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
+ READ_ONCE(d32));
+ }
+}
+
+static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 pkts_sent;
+ u64 ret = 0;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+
+ if (!droq) {
+ dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
+ oct->pf_num, ioq_vector->ioq_num);
+ return 0;
+ }
+
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int.*/
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ /* Never need to handle msix mbox intr for pf. They arrive on the last
+ * msix
+ */
+ return ret;
+}
+
+static irqreturn_t cn23xx_interrupt_handler(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr64;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ intr64 = readq(cn23xx->intr_sum_reg64);
+
+ oct->int_status = 0;
+
+ if (intr64 & CN23XX_INTR_ERR)
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
+ oct->octeon_id, CVM_CAST64(intr64));
+
+ if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
+ if (intr64 & CN23XX_INTR_PKT_DATA)
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & (CN23XX_INTR_DMA0_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+ if (intr64 & (CN23XX_INTR_DMA1_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn23xx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid)
+{
+ u64 bar1;
+ u64 reg_adr;
+
+ if (!valid) {
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ return;
+ }
+
+ /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
+ * bits <41:22> of the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+
+ WRITE_ONCE(bar1, lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
+}
+
+static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
+{
+ lio_pci_writeq(oct, mask,
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+/* always call with lock held */
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 new_idx;
+ u32 last_done;
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupt */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Disable Interrupts */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(0, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
+ oct->pcie_port);
+}
+
+static void cn23xx_get_pf_num(struct octeon_device *oct)
+{
+ u32 fdl_bit = 0;
+
+ /** Read Function Dependency Link reg to get the function number */
+ pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit);
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+}
+
+static void cn23xx_setup_reg_address(struct octeon_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ oct->reg_list.pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
+ oct->reg_list.pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
+ oct->reg_list.pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
+
+ oct->reg_list.pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
+ oct->reg_list.pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
+ oct->reg_list.pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
+
+ oct->reg_list.pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
+ oct->reg_list.pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
+ oct->reg_list.pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
+
+ oct->reg_list.pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
+ oct->reg_list.pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
+ oct->reg_list.pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
+
+ cn23xx_get_pcie_qlmport(oct);
+
+ cn23xx->intr_mask64 = CN23XX_INTR_MASK;
+ if (!oct->msix_on)
+ cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
+ cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
+
+ cn23xx->intr_sum_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ cn23xx->intr_enb_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+}
+
+static int cn23xx_sriov_config(struct octeon_device *oct)
+{
+ u32 total_rings;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ /* num_vfs is already filled for us */
+ u32 pf_srn, num_pf_rings;
+
+ cn23xx->conf =
+ (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+ switch (oct->rev_id) {
+ case OCTEON_CN23XX_REV_1_0:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ break;
+ case OCTEON_CN23XX_REV_1_1:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ break;
+ default:
+ total_rings = CN23XX_MAX_RINGS_PER_PF;
+ break;
+ }
+ if (!oct->sriov_info.num_pf_rings) {
+ if (total_rings > num_present_cpus())
+ num_pf_rings = num_present_cpus();
+ else
+ num_pf_rings = total_rings;
+ } else {
+ num_pf_rings = oct->sriov_info.num_pf_rings;
+
+ if (num_pf_rings > total_rings) {
+ dev_warn(&oct->pci_dev->dev,
+ "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n",
+ num_pf_rings, total_rings);
+ num_pf_rings = total_rings;
+ }
+ }
+
+ total_rings = num_pf_rings;
+ /* the first ring of the pf */
+ pf_srn = total_rings - num_pf_rings;
+
+ oct->sriov_info.trs = total_rings;
+ oct->sriov_info.pf_srn = pf_srn;
+ oct->sriov_info.num_pf_rings = num_pf_rings;
+ dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n",
+ oct->sriov_info.trs, oct->sriov_info.pf_srn,
+ oct->sriov_info.num_pf_rings);
+ return 0;
+}
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
+{
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ cn23xx_get_pf_num(oct);
+
+ if (cn23xx_sriov_config(oct)) {
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+ oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
+ oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
+
+ oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
+ oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
+
+ cn23xx_setup_reg_address(oct);
+
+ oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx)
+{
+ if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf23xx),
+ CN23XX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf23xx),
+ CN23XX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+void cn23xx_dump_iq_regs(struct octeon_device *oct)
+{
+ u32 regval, q_no;
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_DOORBELL(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_DOORBELL(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_BASE_ADDR64(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_SIZE(0),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_CTL_STATUS,
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
+
+ for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+ q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CVM_CAST64(octeon_read_csr64
+ (oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
+ }
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+ CN23XX_CONFIG_PCIE_DEVCTL, regval);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ CVM_CAST64(lio_pci_readq(
+ oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+}
+
+int cn23xx_fw_loaded(struct octeon_device *oct)
+{
+ u64 val;
+
+ val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
+ return (val >> 1) & 1ULL;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
new file mode 100644
index 000000000000..21b5c9051967
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -0,0 +1,59 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+*/
+
+#ifndef __CN23XX_PF_DEVICE_H__
+#define __CN23XX_PF_DEVICE_H__
+
+#include "cn23xx_pf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_pf {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+};
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx);
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+
+int cn23xx_fw_loaded(struct octeon_device *oct);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
new file mode 100644
index 000000000000..03d79d95ab75
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -0,0 +1,604 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX devices.
+*/
+
+#ifndef __CN23XX_PF_REGS_H__
+#define __CN23XX_PF_REGS_H__
+
+#define CN23XX_CONFIG_VENDOR_ID 0x00
+#define CN23XX_CONFIG_DEVICE_ID 0x02
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_MSIX_CAP 0x50
+#define CN23XX_CONFIG_MSIX_LMSI 0x54
+#define CN23XX_CONFIG_MSIX_UMSI 0x58
+#define CN23XX_CONFIG_MSIX_MSIMD 0x5C
+#define CN23XX_CONFIG_MSIX_MSIMM 0x60
+#define CN23XX_CONFIG_MSIX_MSIMP 0x64
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+#define CN23XX_CONFIG_PCIE_DEVCTL2 0x98
+#define CN23XX_CONFIG_PCIE_LINKCTL2 0xA0
+#define CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK 0x108
+#define CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS 0x110
+#define CN23XX_CONFIG_PCIE_DEVCTL_MASK 0x00040000
+
+#define CN23XX_PCIE_SRIOV_FDL 0x188
+#define CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN23XX_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+#define CN23XX_CONFIG_SRIOV_VFDEVID 0x190
+
+#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C
+#define CN23XX_CONFIG_SRIOV_BARX(i) \
+ (CN23XX_CONFIG_SRIOV_BAR_START + (i * 4))
+#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08
+#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04
+#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01
+
+/* ############## BAR0 Registers ################ */
+
+#define CN23XX_SLI_CTL_PORT_START 0x286E0
+#define CN23XX_PORT_OFFSET 0x10
+
+#define CN23XX_SLI_CTL_PORT(p) \
+ (CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
+
+/* 2 scatch registers (64-bit) */
+#define CN23XX_SLI_WINDOW_CTL 0x282E0
+#define CN23XX_SLI_SCRATCH1 0x283C0
+#define CN23XX_SLI_SCRATCH2 0x283D0
+#define CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL
+
+/* 1 registers (64-bit) - SLI_CTL_STATUS */
+#define CN23XX_SLI_CTL_STATUS 0x28570
+
+/* SLI Packet Input Jabber Register (64 bit register)
+ * <31:0> for Byte count for limiting sizes of packet sizes
+ * that are allowed for sli packet inbound packets.
+ * the default value is 0xFA00(=64000).
+ */
+#define CN23XX_SLI_PKT_IN_JABBER 0x29170
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+#define CN23XX_WIN_WR_ADDR_LO 0x20000
+#define CN23XX_WIN_WR_ADDR_HI 0x20004
+#define CN23XX_WIN_WR_ADDR64 CN23XX_WIN_WR_ADDR_LO
+
+#define CN23XX_WIN_RD_ADDR_LO 0x20010
+#define CN23XX_WIN_RD_ADDR_HI 0x20014
+#define CN23XX_WIN_RD_ADDR64 CN23XX_WIN_RD_ADDR_LO
+
+#define CN23XX_WIN_WR_DATA_LO 0x20020
+#define CN23XX_WIN_WR_DATA_HI 0x20024
+#define CN23XX_WIN_WR_DATA64 CN23XX_WIN_WR_DATA_LO
+
+#define CN23XX_WIN_RD_DATA_LO 0x20040
+#define CN23XX_WIN_RD_DATA_HI 0x20044
+#define CN23XX_WIN_RD_DATA64 CN23XX_WIN_RD_DATA_LO
+
+#define CN23XX_WIN_WR_MASK_LO 0x20030
+#define CN23XX_WIN_WR_MASK_HI 0x20034
+#define CN23XX_WIN_WR_MASK_REG CN23XX_WIN_WR_MASK_LO
+#define CN23XX_SLI_MAC_CREDIT_CNT 0x23D70
+
+/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
+ * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
+ */
+#define CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030
+
+/*1 register (64-bit) to determine whether IOQs are in reset. */
+#define CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_MAC_RINFO_OFFSET 0x20
+#define CN23XX_PF_RINFO_OFFSET 0x10
+
+#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \
+ (CN23XX_SLI_PKT_MAC_RINFO_START64 + \
+ ((mac) * CN23XX_MAC_RINFO_OFFSET) + \
+ ((pf) * CN23XX_PF_RINFO_OFFSET))
+
+/** mask for total rings, setting TRS to base */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16)
+/** mask for starting ring number: setting SRN <6:0> = 0x7F */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN (0x7F)
+
+/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16
+/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS 0
+/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS 32
+/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS 48
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/** These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN23XX_SLI_OQ_WMARK 0x29180
+
+/* Global pkt control register */
+#define CN23XX_SLI_GBL_CONTROL 0x29210
+
+/* Backpressure enable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1S 0x29260
+
+/* Backpressure enable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1S 0x29270
+
+/* Backpressure disable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1C 0x29280
+
+/* Backpressure disable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1C 0x29290
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_SLI_MAC_PF_MBOX_INT_START 0x27380
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf) \
+ (CN23XX_SLI_MAC_PF_MBOX_INT_START + \
+ ((mac) * CN23XX_MAC_INT_OFFSET + \
+ (pf) * CN23XX_PF_INT_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN23XX_DMA_CNT_START 0x28400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */
+/* SLI_DMA_0_TIM */
+#define CN23XX_DMA_TIM_START 0x28420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN23XX_DMA_INT_LEVEL_START 0x283E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN23XX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN23XX_DMA_CNT(dq) \
+ (CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_PKT_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIME_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIM(dq) \
+ (CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET))
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+/*######################## INTERRUPTS #########################*/
+#define CN23XX_MAC_INT_OFFSET 0x20
+#define CN23XX_PF_INT_OFFSET 0x10
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN23XX_SLI_INT_SUM64 0x27000
+
+/* 4 registers (64-bit) for Interrupt Enable for each Port */
+#define CN23XX_SLI_INT_ENB64 0x27080
+
+#define CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \
+ (CN23XX_SLI_INT_SUM64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \
+ (CN23XX_SLI_INT_ENB64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
+#define CN23XX_SLI_PKT_CNT_INT 0x29130
+
+/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
+#define CN23XX_SLI_PKT_TIME_INT 0x29140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+#define CN23XX_INTR_RML_TIMEOUT_ERR (1)
+
+#define CN23XX_INTR_MIO_INT BIT(1)
+
+#define CN23XX_INTR_RESERVED1 (3 << 2)
+
+#define CN23XX_INTR_PKT_COUNT BIT(4)
+#define CN23XX_INTR_PKT_TIME BIT(5)
+
+#define CN23XX_INTR_RESERVED2 (3 << 6)
+
+#define CN23XX_INTR_M0UPB0_ERR BIT(8)
+#define CN23XX_INTR_M0UPWI_ERR BIT(9)
+#define CN23XX_INTR_M0UNB0_ERR BIT(10)
+#define CN23XX_INTR_M0UNWI_ERR BIT(11)
+
+#define CN23XX_INTR_RESERVED3 (0xFFFFFULL << 12)
+
+#define CN23XX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN23XX_INTR_DMA1_FORCE BIT_ULL(33)
+
+#define CN23XX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN23XX_INTR_DMA1_COUNT BIT_ULL(35)
+
+#define CN23XX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN23XX_INTR_DMA1_TIME BIT_ULL(37)
+
+#define CN23XX_INTR_RESERVED4 (0x7FFFFULL << 38)
+
+#define CN23XX_INTR_VF_MBOX BIT_ULL(57)
+#define CN23XX_INTR_DMAVF_ERR BIT_ULL(58)
+#define CN23XX_INTR_DMAPF_ERR BIT_ULL(59)
+
+#define CN23XX_INTR_PKTVF_ERR BIT_ULL(60)
+#define CN23XX_INTR_PKTPF_ERR BIT_ULL(61)
+#define CN23XX_INTR_PPVF_ERR BIT_ULL(62)
+#define CN23XX_INTR_PPPF_ERR BIT_ULL(63)
+
+#define CN23XX_INTR_DMA0_DATA (CN23XX_INTR_DMA0_TIME)
+#define CN23XX_INTR_DMA1_DATA (CN23XX_INTR_DMA1_TIME)
+
+#define CN23XX_INTR_DMA_DATA \
+ (CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA)
+
+/* By fault only TIME based */
+#define CN23XX_INTR_PKT_DATA (CN23XX_INTR_PKT_TIME)
+/* For both COUNT and TIME based */
+/* #define CN23XX_INTR_PKT_DATA \
+ * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME)
+ */
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN23XX_INTR_PCIE_DATA \
+ (CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT)
+
+/* Sum of interrupts for error events */
+#define CN23XX_INTR_ERR \
+ (CN23XX_INTR_M0UPB0_ERR | \
+ CN23XX_INTR_M0UPWI_ERR | \
+ CN23XX_INTR_M0UNB0_ERR | \
+ CN23XX_INTR_M0UNWI_ERR | \
+ CN23XX_INTR_DMAVF_ERR | \
+ CN23XX_INTR_DMAPF_ERR | \
+ CN23XX_INTR_PKTPF_ERR | \
+ CN23XX_INTR_PPPF_ERR | \
+ CN23XX_INTR_PPVF_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN23XX_INTR_MASK \
+ (CN23XX_INTR_DMA_DATA | \
+ CN23XX_INTR_DMA0_FORCE | \
+ CN23XX_INTR_DMA1_FORCE | \
+ CN23XX_INTR_MIO_INT | \
+ CN23XX_INTR_ERR)
+
+/* 4 Registers (64 - bit) */
+#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
+#define CN23XX_SLI_S2M_PORTX_CTL(port) \
+ (CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10))
+
+#define CN23XX_SLI_MAC_NUMBER 0x20050
+
+/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as
+ * addr = (0x00011800C0000100 |port <<24 |idx <<3 )
+ * Here, port is PEM(0..3) & idx is INDEX(0..15)
+ */
+#define CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL
+#define CN23XX_PEM_OFFSET 24
+#define CN23XX_BAR1_INDEX_OFFSET 3
+
+#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
+ (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+ ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+
+/*############################ DPI #########################*/
+
+/* 1 register (64-bit) - provides DMA Enable */
+#define CN23XX_DPI_CTL 0x0001df0000000040ULL
+
+/* 1 register (64-bit) - Controls the DMA IO Operation */
+#define CN23XX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable */
+#define CN23XX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RSP
+ * Indicates which Instr'n Queue received error response from the IO sub-system
+ */
+#define CN23XX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RST
+ * Indicates which Instr'n Queue dropped an Instr'n
+ */
+#define CN23XX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN
+ * Provides DMA Engine Queue Enable
+ */
+#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8))
+
+/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
+ * Provides control bits for transaction on 8 Queues
+ */
+#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL
+#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \
+ (CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8))
+
+/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
+ * Provides DMA Engine FIFO (Queue) Size
+ */
+#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+#define CN23XX_DPI_DMA_ENG_BUF(eng) \
+ (CN23XX_DPI_DMA_ENG0_BUF + (eng * 8))
+
+/* 4 Registers (64-bit) */
+#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
+#define CN23XX_DPI_SLI_PRTX_CFG(port) \
+ (CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8))
+
+/* Masks for DPI_DMA_CONTROL Register */
+#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN23XX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN23XX_DPI_DMA_ENB (0x0FULL << 48)
+/* Set the DMA Control, to update packet count not byte count sent by DMA,
+ * when we use Interrupt Coalescing (CA mode)
+ */
+#define CN23XX_DPI_DMA_O_ADD1 BIT(19)
+/*selecting 64-bit Byte Swap Mode */
+#define CN23XX_DPI_DMA_O_ES BIT(15)
+#define CN23XX_DPI_DMA_O_MODE BIT(14)
+
+#define CN23XX_DPI_DMA_CTL_MASK \
+ (CN23XX_DPI_DMA_COMMIT_MODE | \
+ CN23XX_DPI_DMA_PKT_EN | \
+ CN23XX_DPI_DMA_O_ES | \
+ CN23XX_DPI_DMA_O_MODE)
+
+/*############################ RST #########################*/
+
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+#define CN23XX_RST_SOFT_RST 0x0001180006001680ULL
+
+#define CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index c03d37016a48..e779af88621b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -338,7 +338,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
}
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
{
u32 mask;
@@ -353,6 +353,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
mask |= oct->io_qmask.oq;
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ return 0;
}
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
@@ -418,36 +420,6 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
}
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
-{
- int i;
-
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
- continue;
- oct->fn_list.setup_iq_regs(oct, i);
- }
-
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- oct->fn_list.setup_oq_regs(oct, i);
- }
-
- oct->fn_list.setup_device_regs(oct);
-
- oct->fn_list.enable_interrupt(oct->chip);
-
- oct->fn_list.enable_io_queues(oct);
-
- /* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
- }
-}
-
void
lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
u64 core_addr,
@@ -507,18 +479,20 @@ lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
return new_idx;
}
-void lio_cn6xxx_enable_interrupt(void *chip)
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
/* Enable Interrupt */
writeq(mask, cn6xxx->intr_enb_reg64);
}
-void lio_cn6xxx_disable_interrupt(void *chip)
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
@@ -714,7 +688,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 28c47224221a..a40a91394079 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -80,18 +80,17 @@ void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
u32 idx, int valid);
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
-void lio_cn6xxx_enable_interrupt(void *chip);
-void lio_cn6xxx_disable_interrupt(void *chip);
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 29755bc68f12..dbf3566ead53 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -148,7 +148,6 @@ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
new file mode 100644
index 000000000000..201eddb3013a
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -0,0 +1,266 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+/*TODO, Use some other pound define to suggest
+ * the fact that iqs are not tied to netdevs
+ * and can take traffic from different netdevs
+ * hence bql reporting is done per packet
+ * than in bulk. Usage of NO_NAPI in txq completion is
+ * a little confusing
+ */
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ mac[0], mac[1],
+ mac[2], mac[3],
+ mac[4], mac[5]);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param1);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param1);
+ netdev->mtu = nctrl->ncmd.s.param1;
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_ID_ACTIVE:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d ADDED\n",
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d DELETED\n",
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 289eb8907922..f163e0abbeb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,6 +32,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
static int octnet_get_link_stats(struct net_device *netdev);
@@ -75,6 +76,7 @@ enum {
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
#define OCT_ETHTOOL_REGSVER 1
/* statistics of PF */
@@ -188,6 +190,10 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
"buffer_alloc_failure",
};
+/* LiquidIO driver private flags */
+static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
+};
+
#define OCTNIC_NCMD_AUTONEG_ON 0x1
#define OCTNIC_NCMD_PHY_ON 0x2
@@ -259,6 +265,13 @@ lio_ethtool_get_channels(struct net_device *dev,
max_tx = CFG_GET_IQ_MAX_Q(conf6x);
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf23);
+ max_tx = CFG_GET_IQ_MAX_Q(conf23);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
}
channel->max_rx = max_rx;
@@ -290,18 +303,16 @@ lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_board_info *board_info;
- int len;
- if (eeprom->offset != 0)
+ if (eeprom->offset)
return -EINVAL;
eeprom->magic = oct_dev->pci_dev->vendor;
board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
- len =
- sprintf((char *)bytes,
- "boardname:%s serialnum:%s maj:%lld min:%lld\n",
- board_info->name, board_info->serial_number,
- board_info->major, board_info->minor);
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
return 0;
}
@@ -333,6 +344,32 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
return 0;
}
+static int octnet_id_active(struct net_device *netdev, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
+ nctrl.ncmd.s.param1 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Callback for when mdio command response arrives
*/
static void octnet_mdio_resp_callback(struct octeon_device *oct,
@@ -406,7 +443,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
- retval = -EBUSY;
+ retval = -EBUSY;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived
@@ -476,6 +513,11 @@ static int lio_set_phys_id(struct net_device *netdev,
&value);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+
+ /* returns 0 since updates are asynchronous */
+ return 0;
} else {
return -EINVAL;
}
@@ -521,7 +563,10 @@ static int lio_set_phys_id(struct net_device *netdev,
&lio->phy_beacon_val);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ return 0;
} else {
return -EINVAL;
}
@@ -550,6 +595,13 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
}
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
@@ -610,6 +662,69 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = oct->rx_pause;
}
+static int
+lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers.
+ */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct oct_link_info *linfo = &lio->linfo;
+
+ int ret = 0;
+
+ if (oct->chip_id != OCTEON_CN23XX_PF_VID)
+ return -EINVAL;
+
+ if (linfo->link.s.duplex == 0) {
+ /*no flow control for half duplex*/
+ if (pause->rx_pause || pause->tx_pause)
+ return -EINVAL;
+ }
+
+ /*do not support autoneg of link flow control*/
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ if (pause->rx_pause) {
+ /*enable rx pause*/
+ nctrl.ncmd.s.param1 = 1;
+ } else {
+ /*disable rx pause*/
+ nctrl.ncmd.s.param1 = 0;
+ }
+
+ if (pause->tx_pause) {
+ /*enable tx pause*/
+ nctrl.ncmd.s.param2 = 1;
+ } else {
+ /*disable tx pause*/
+ nctrl.ncmd.s.param2 = 0;
+ }
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
+ return -EINVAL;
+ }
+
+ oct->rx_pause = pause->rx_pause;
+ oct->tx_pause = pause->tx_pause;
+
+ return 0;
+}
+
static void
lio_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats __attribute__((unused)),
@@ -877,6 +992,27 @@ lio_get_ethtool_stats(struct net_device *netdev,
}
}
+static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
+ sprintf(data, "%s", oct_priv_flags_strings[i]);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ break;
+ }
+}
+
static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct lio *lio = GET_LIO(netdev);
@@ -916,12 +1052,31 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
default:
netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
break;
}
}
+static int lio_get_priv_flags_ss_count(struct lio *lio)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return ARRAY_SIZE(oct_priv_flags_strings);
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ return -EOPNOTSUPP;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int lio_get_sset_count(struct net_device *netdev, int sset)
{
struct lio *lio = GET_LIO(netdev);
@@ -932,6 +1087,8 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
return (ARRAY_SIZE(oct_stats_strings) +
ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
default:
return -EOPNOTSUPP;
}
@@ -948,6 +1105,16 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ if (!intrmod_cfg->rx_enable) {
+ intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ intrmod_cfg->rx_frames;
+ }
+ if (!intrmod_cfg->tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ intrmod_cfg->tx_frames;
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX: {
struct octeon_cn6xxx *cn6xxx =
@@ -983,7 +1150,15 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_coalesce_usecs_low =
intrmod_cfg->rx_mintmr_trigger;
intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->rx_mincnt_trigger;
+ intrmod_cfg->rx_mincnt_trigger;
+ }
+ if (OCTEON_CN23XX_PF(oct) &&
+ (intrmod_cfg->tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg->tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg->tx_mincnt_trigger;
}
return 0;
}
@@ -1060,11 +1235,11 @@ static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
- sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
- sc->ctxptr;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl =
+ (struct oct_nic_stats_ctrl *)sc->ctxptr;
struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
@@ -1314,14 +1489,35 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ rx_max_coalesced_frames);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ break;
+ }
default:
return -EINVAL;
}
return 0;
}
-static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
- *intr_coal)
+static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 time_threshold, rx_coalesce_usecs;
@@ -1346,6 +1542,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ time_threshold =
+ cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (oct->intrmod.rx_frames |
+ (time_threshold << 32)));
+ /*consider writing to resend bit here*/
+ }
+ oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1358,12 +1575,37 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
__attribute__((unused)))
{
struct octeon_device *oct = lio->oct_dev;
+ u32 iq_intr_pkt;
+ void __iomem *inst_cnt_reg;
+ u64 val;
/* Config Cnt based interrupt values */
switch (oct->chip_id) {
case OCTEON_CN68XX:
case OCTEON_CN66XX:
break;
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->tx_max_coalesced_frames)
+ iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ else
+ iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
+ val = readq(inst_cnt_reg);
+ /*clear wmark and count.dont want to write count back*/
+ val = (val & 0xFFFF000000000000ULL) |
+ ((u64)iq_intr_pkt
+ << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
+ writeq(val, inst_cnt_reg);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.tx_frames = iq_intr_pkt;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1399,6 +1641,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
return -EINVAL;
}
break;
+ case OCTEON_CN23XX_PF_VID:
+ break;
default:
return -EINVAL;
}
@@ -1541,9 +1785,237 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
+static int lio_get_regs_len(struct net_device *dev)
{
- return OCT_ETHTOOL_REGDUMP_LEN;
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX;
+ default:
+ return OCT_ETHTOOL_REGDUMP_LEN;
+ }
+}
+
+static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ u8 pf_num = oct->pf_num;
+ int len = 0;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ /*0x29030 or 0x29040*/
+ reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27080 or 0x27090*/
+ reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27000 or 0x27010*/
+ reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29120*/
+ reg = 0x29120;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27300*/
+ reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
+ oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27200*/
+ reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*29130*/
+ reg = CN23XX_SLI_PKT_CNT_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29140*/
+ reg = CN23XX_SLI_PKT_TIME_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29160*/
+ reg = 0x29160;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29180*/
+ reg = CN23XX_SLI_OQ_WMARK;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x291E0*/
+ reg = CN23XX_SLI_PKT_IOQ_RING_RST;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29210*/
+ reg = CN23XX_SLI_GBL_CONTROL;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29220*/
+ reg = 0x29220;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*PF only*/
+ if (pf_num == 0) {
+ /*0x29260*/
+ reg = CN23XX_SLI_OUT_BP_EN_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ } else if (pf_num == 1) {
+ /*0x29270*/
+ reg = CN23XX_SLI_OUT_BP_EN2_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10080*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10090*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_SIZE(i);
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10050*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10070*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100a0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100b0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100c0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x10000*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10010*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
+ i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10020*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_DOORBELL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10030*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_SIZE(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
}
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
@@ -1688,6 +2160,10 @@ static void lio_get_regs(struct net_device *dev,
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
+ len += cn23xx_read_csr_reg(regbuf + len, oct);
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
@@ -1729,6 +2205,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_strings = lio_get_strings,
.get_ethtool_stats = lio_get_ethtool_stats,
.get_pauseparam = lio_get_pauseparam,
+ .set_pauseparam = lio_set_pauseparam,
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 20d6942edf40..afc6f9dc8119 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,11 +21,10 @@
**********************************************************************/
#include <linux/version.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/if_vlan.h>
#include <linux/firmware.h>
#include <linux/ptp_clock_kernel.h>
#include <net/vxlan.h>
+#include <linux/kthread.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -37,6 +36,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_device.h"
+#include "cn23xx_pf_device.h"
#include "liquidio_image.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
@@ -52,11 +52,6 @@ module_param(ddr_timeout, int, 0644);
MODULE_PARM_DESC(ddr_timeout,
"Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
-static u32 console_bitmask;
-module_param(console_bitmask, int, 0644);
-MODULE_PARM_DESC(console_bitmask,
- "Bitmask indicating which consoles have debug output redirected to syslog.");
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
@@ -102,6 +97,14 @@ struct liquidio_if_cfg_resp {
u64 status;
};
+struct liquidio_rx_ctl_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -139,7 +142,8 @@ union tx_info {
#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
-#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
/** Structure of a node in list of gather components maintained by
* NIC driver for each network device.
@@ -162,27 +166,6 @@ struct octnic_gather {
u64 sg_dma_ptr;
};
-/** This structure is used by NIC driver to store information required
- * to free the sk_buff when the packet has been fetched by Octeon.
- * Bytes offset below assume worst-case of a 64-bit system.
- */
-struct octnet_buf_free_info {
- /** Bytes 1-8. Pointer to network device private structure. */
- struct lio *lio;
-
- /** Bytes 9-16. Pointer to sk_buff. */
- struct sk_buff *skb;
-
- /** Bytes 17-24. Pointer to gather list. */
- struct octnic_gather *g;
-
- /** Bytes 25-32. Physical address of skb->data or gather list. */
- u64 dptr;
-
- /** Bytes 33-47. Piggybacked soft command, if any */
- struct octeon_soft_command *sc;
-};
-
struct handshake {
struct completion init;
struct completion started;
@@ -198,6 +181,7 @@ struct octeon_device_priv {
};
static int octeon_device_init(struct octeon_device *);
+static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
static int liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -219,6 +203,20 @@ static void octeon_droq_bh(unsigned long pdev)
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
+ lio_enable_irq(oct->droq[q_no], NULL);
+
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ /* set time and cnt interrupt thresholds for this DROQ
+ * for NAPI
+ */
+ int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
+ 0x5700000040ULL);
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
+ }
}
if (reschedule)
@@ -252,76 +250,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
return pkt_cnt;
}
-void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
- unsigned int bytes_compl)
-{
- struct netdev_queue *netdev_queue = txq;
-
- netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
-}
-
-void octeon_update_tx_completion_counters(void *buf, int reqtype,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb = NULL;
- struct octeon_soft_command *sc;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- (*pkts_compl)++;
- *bytes_compl += skb->len;
-}
-
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb;
- struct octeon_soft_command *sc;
- struct netdev_queue *txq;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
- netdev_tx_sent_queue(txq, skb->len);
-}
-
-int octeon_console_debug_enabled(u32 console)
-{
- return (console_bitmask >> (console)) & 0x1;
-}
-
/**
* \brief Forces all IO queues off on a given device
* @param oct Pointer to Octeon device
@@ -441,7 +369,7 @@ static void stop_pci_io(struct octeon_device *oct)
pci_disable_device(oct->pci_dev);
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
pcierror_quiesce_device(oct);
@@ -570,6 +498,9 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
{ /* 66xx */
PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
},
+ { /* 23xx pf */
+ PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
{
0, 0, 0, 0, 0, 0, 0
}
@@ -587,7 +518,6 @@ static struct pci_driver liquidio_pci_driver = {
.suspend = liquidio_suspend,
.resume = liquidio_resume,
#endif
-
};
/**
@@ -936,6 +866,52 @@ static void print_link_info(struct net_device *netdev)
}
/**
+ * \brief Routine to notify MTU change
+ * @param work work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * \brief Sets up the mtu status change work
+ * @param netdev network device
+ */
+static inline int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static inline void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
* \brief Update link status
* @param netdev network device
* @param ls link status structure
@@ -973,8 +949,6 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
struct lio *lio;
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
- /*octeon_update_iq_read_idx(oct, iq);*/
-
netdev = oct->props[iq->ifidx].netdev;
/* This is needed because the first IQ does not have
@@ -1002,12 +976,32 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
}
}
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+ return 0;
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
*/
-static
-void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1032,19 +1026,36 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
}
}
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ u64 ret;
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
/**
* \brief Interrupt handler for octeon
* @param irq unused
* @param dev octeon device
*/
static
-irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+ void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
/* Disable our interrupts for the duration of ISR */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
ret = oct->fn_list.process_interrupt_regs(oct);
@@ -1053,7 +1064,7 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
/* Re-enable our interrupts */
if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
- oct->fn_list.enable_interrupt(oct->chip);
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
return ret;
}
@@ -1067,22 +1078,204 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
static int octeon_setup_interrupt(struct octeon_device *oct)
{
int irqret, err;
+ struct msix_entry *msix_entries;
+ int i;
+ int num_ioq_vectors;
+ int num_alloc_ioq_vectors;
- err = pci_enable_msi(oct->pci_dev);
- if (err)
- dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
- err);
- else
- oct->flags |= LIO_FLAG_MSI_ENABLED;
-
- irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
- IRQF_SHARED, "octeon", oct);
- if (irqret) {
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
- dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
- irqret);
- return 1;
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
+ /* one non ioq interrupt for handling sli_mac_pf_int_sum */
+ oct->num_msix_irqs += 1;
+
+ oct->msix_entries = kcalloc(
+ oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ return 1;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ /*Assumption is that pf msix vectors start from pf srn to pf to
+ * trs and not from 0. if not change this code
+ */
+ for (i = 0; i < oct->num_msix_irqs - 1; i++)
+ msix_entries[i].entry = oct->sriov_info.pf_srn + i;
+ msix_entries[oct->num_msix_irqs - 1].entry =
+ oct->sriov_info.trs;
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+
+ /** For PF, there is one non-ioq interrupt handler */
+ num_ioq_vectors -= 1;
+ irqret = request_irq(msix_entries[num_ioq_vectors].vector,
+ liquidio_legacy_intr_handler, 0, "octeon",
+ oct);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+
+ for (i = 0; i < num_ioq_vectors; i++) {
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ "octeon", &oct->ioq_vector[i]);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ /** Freeing the non-ioq irq vector here . */
+ free_irq(msix_entries[num_ioq_vectors].vector,
+ oct);
+
+ while (i) {
+ i--;
+ /** clearing affinity mask. */
+ irq_set_affinity_hint(
+ msix_entries[i].vector, NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ (&oct->ioq_vector[i].affinity_mask));
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+ oct->octeon_id);
+ } else {
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq,
+ liquidio_legacy_intr_handler, IRQF_SHARED,
+ "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int liquidio_watchdog(void *param)
+{
+ u64 wdog;
+ u16 mask_of_stuck_cores = 0;
+ u16 mask_of_crashed_cores = 0;
+ int core_num;
+ u8 core_is_stuck[LIO_MAX_CORES];
+ u8 core_crashed[LIO_MAX_CORES];
+ struct octeon_device *oct = param;
+
+ memset(core_is_stuck, 0, sizeof(core_is_stuck));
+ memset(core_crashed, 0, sizeof(core_crashed));
+
+ while (!kthread_should_stop()) {
+ mask_of_crashed_cores =
+ (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+
+ for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
+ if (!core_is_stuck[core_num]) {
+ wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
+
+ /* look at watchdog state field */
+ wdog &= CIU3_WDOG_MASK;
+ if (wdog) {
+ /* this watchdog timer has expired */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_WDOG_EXPIRE;
+ mask_of_stuck_cores |= (1 << core_num);
+ }
+ }
+
+ if (!core_crashed[core_num])
+ core_crashed[core_num] =
+ (mask_of_crashed_cores >> core_num) & 1;
+ }
+
+ if (mask_of_stuck_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_is_stuck[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d is stuck!\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+
+ if (mask_of_crashed_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_crashed[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_crashed[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+#ifdef CONFIG_MODULE_UNLOAD
+ if (mask_of_stuck_cores || mask_of_crashed_cores) {
+ /* make module refcount=0 so that rmmod will work */
+ long refcount;
+
+ refcount = module_refcount(THIS_MODULE);
+
+ while (refcount > 0) {
+ module_put(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+
+ /* compensate for and withstand an unlikely (but still
+ * possible) race condition
+ */
+ while (refcount < 0) {
+ try_module_get(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+ }
+#endif
+ /* sleep for two seconds */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
}
return 0;
@@ -1107,6 +1300,9 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (pdev->device == OCTEON_CN23XX_PF_VID)
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
dev_info(&pdev->dev, "Initializing device %x:%x.\n",
(u32)pdev->vendor, (u32)pdev->device);
@@ -1130,6 +1326,30 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ u64 scratch1;
+ u8 bus, device, function;
+
+ scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
+ if (!(scratch1 & 4ULL)) {
+ /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
+ * the lio watchdog kernel thread is running for this
+ * NIC. Each NIC gets one watchdog kernel thread.
+ */
+ scratch1 |= 4ULL;
+ octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
+ scratch1);
+
+ bus = pdev->bus->number;
+ device = PCI_SLOT(pdev->devfn);
+ function = PCI_FUNC(pdev->devfn);
+ oct_dev->watchdog_task = kthread_create(
+ liquidio_watchdog, oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
+ wake_up_process(oct_dev->watchdog_task);
+ }
+ }
+
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
@@ -1146,6 +1366,7 @@ liquidio_probe(struct pci_dev *pdev,
static void octeon_destroy_resources(struct octeon_device *oct)
{
int i;
+ struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1190,21 +1411,40 @@ static void octeon_destroy_resources(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs - 1; i++) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ /* non-iov vector's argument is oct struct */
+ free_irq(msix_entries[i].vector, oct);
- /* Release the interrupt line */
- free_irq(oct->pci_dev->irq, oct);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ } else {
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ }
- /* fallthrough */
+ if (OCTEON_CN23XX_PF(oct))
+ octeon_free_ioq_vector(oct);
+ /* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1226,16 +1466,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_delete_response_list(oct);
/* fallthrough */
- case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
- octeon_free_sc_buffer_pool(oct);
-
- /* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
octeon_delete_instr_queue(oct, i);
}
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
/* fallthrough */
case OCT_DEV_DISPATCH_INIT_DONE:
@@ -1244,9 +1483,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
-
/* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
+ if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
+ oct->fn_list.soft_reset(oct);
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1264,23 +1503,89 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
+ * \brief Callback for rx ctrl
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void rx_ctl_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_rx_ctl_context *ctx;
+
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status)
+ dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
* \brief Send Rx control command
* @param lio per-network private data
* @param start_stop whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
- struct octnic_ctrl_pkt nctrl;
+ struct octeon_soft_command *sc;
+ struct liquidio_rx_ctl_context *ctx;
+ union octnet_cmd *ncmd;
+ int ctx_size = sizeof(struct liquidio_rx_ctl_context);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ int retval;
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return;
- nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = start_stop;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.netpndev = (u64)lio->netdev;
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, ctx_size);
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ sc->callback = rx_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ return;
+ oct->props[lio->ifidx].rx_on = start_stop;
+ }
+
+ octeon_free_soft_command(oct, sc);
}
/**
@@ -1307,21 +1612,24 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
- send_rx_ctrl_cmd(lio, 0);
-
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
- txqs_stop(netdev);
+ liquidio_stop(netdev);
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
}
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_link_status_change_wq(netdev);
+
delete_glists(lio);
free_netdev(netdev);
@@ -1374,6 +1682,9 @@ static void liquidio_remove(struct pci_dev *pdev)
dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+ if (oct_dev->watchdog_task)
+ kthread_stop(oct_dev->watchdog_task);
+
if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
liquidio_stop_nic_module(oct_dev);
@@ -1417,6 +1728,12 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
s = "CN66XX";
break;
+ case OCTEON_CN23XX_PCIID_PF:
+ oct->chip_id = OCTEON_CN23XX_PF_VID;
+ ret = setup_cn23xx_octeon_pf_device(oct);
+ s = "CN23XX";
+ break;
+
default:
s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
@@ -1867,7 +2184,7 @@ static void if_cfg_callback(struct octeon_device *oct,
struct liquidio_if_cfg_context *ctx;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
@@ -2060,11 +2377,14 @@ static void napi_schedule_wrapper(void *param)
*/
static void liquidio_napi_drv_callback(void *arg)
{
+ struct octeon_device *oct;
struct octeon_droq *droq = arg;
int this_cpu = smp_processor_id();
- if (droq->cpu_id == this_cpu) {
- napi_schedule(&droq->napi);
+ oct = droq->oct_dev;
+
+ if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
+ napi_schedule_irqoff(&droq->napi);
} else {
struct call_single_data *csd = &droq->csd;
@@ -2173,17 +2493,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
lio->ifidx), NULL);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime DROQ(RxQ) creation failed.\n",
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
__func__);
return 1;
}
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- dev_dbg(&octeon_dev->pci_dev->dev,
- "netif_napi_add netdev:%llx oct:%llx\n",
- (u64)netdev,
- (u64)octeon_dev);
+ dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
+ (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
@@ -2195,6 +2513,14 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
}
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ /* 23XX PF can receive control messages (via the first PF-owned
+ * droq) from the firmware even if the ethX interface is down,
+ * so that's why poll_mode must be off for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+ }
+
/* set up IQs. */
for (q = 0; q < lio->linfo.num_txpciq; q++) {
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
@@ -2235,7 +2561,7 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
* \brief Sets up the txq poll check
* @param netdev network device
*/
-static inline void setup_tx_poll_fn(struct net_device *netdev)
+static inline int setup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -2244,21 +2570,24 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
- return;
+ return -1;
}
INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
octnet_poll_check_txq_status);
lio->txq_status_wq.wk.ctxptr = lio;
queue_delayed_work(lio->txq_status_wq.wq,
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+ return 0;
}
static inline void cleanup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- destroy_workqueue(lio->txq_status_wq.wq);
+ if (lio->txq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+ }
}
/**
@@ -2276,24 +2605,34 @@ static int liquidio_open(struct net_device *netdev)
napi_enable(napi);
oct->props[lio->ifidx].napi_enabled = 1;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 1;
}
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- setup_tx_poll_fn(netdev);
-
- start_txq(netdev);
+ /* Ready for link status updates */
+ lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ } else {
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ }
+
+ start_txq(netdev);
+
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
- /* Ready for link status updates */
- lio->intf_open = 1;
-
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
@@ -2328,7 +2667,12 @@ static int liquidio_stop(struct net_device *netdev)
/* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cleanup_tx_poll_fn(netdev);
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+ } else {
+ cleanup_tx_poll_fn(netdev);
+ }
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
@@ -2340,143 +2684,6 @@ static int liquidio_stop(struct net_device *netdev)
return 0;
}
-void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
-{
- struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
- struct net_device *netdev = (struct net_device *)nctrl->netpndev;
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- u8 *mac;
-
- switch (nctrl->ncmd.s.cmd) {
- case OCTNET_CMD_CHANGE_DEVFLAGS:
- case OCTNET_CMD_SET_MULTI_LIST:
- break;
-
- case OCTNET_CMD_CHANGE_MACADDR:
- mac = ((u8 *)&nctrl->udd[0]) + 2;
- netif_info(lio, probe, lio->netdev,
- "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- "MACAddr changed to", mac[0], mac[1],
- mac[2], mac[3], mac[4], mac[5]);
- break;
-
- case OCTNET_CMD_CHANGE_MTU:
- /* If command is successful, change the MTU. */
- netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param1);
- dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
- netdev->name, netdev->mtu,
- nctrl->ncmd.s.param1);
- rtnl_lock();
- netdev->mtu = nctrl->ncmd.s.param1;
- call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
- rtnl_unlock();
- break;
-
- case OCTNET_CMD_GPIO_ACCESS:
- netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
-
- break;
-
- case OCTNET_CMD_LRO_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_LRO_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ADD_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_DEL_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_SET_SETTINGS:
- dev_info(&oct->pci_dev->dev, "%s settings changed\n",
- netdev->name);
-
- break;
- /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_RX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_RXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_TX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_TXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_VXLAN_PORT_CONFIG:
- if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d ADDED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- } else if (nctrl->ncmd.s.more ==
- OCTNET_CMD_VXLAN_PORT_DEL) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d DELETED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- }
- break;
-
- case OCTNET_CMD_SET_FLOW_CTL:
- netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
- break;
-
- default:
- dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
- nctrl->ncmd.s.cmd);
- }
-}
-
/**
* \brief Converts a mask based on net device flags
* @param netdev network device
@@ -2817,8 +3024,7 @@ static void handle_timestamp(struct octeon_device *oct,
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
- struct octnet_buf_free_info *finfo,
- int xmit_more)
+ struct octnet_buf_free_info *finfo)
{
int retval;
struct octeon_soft_command *sc;
@@ -2846,9 +3052,15 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
+ if (OCTEON_CN23XX_PF(oct))
+ len = (u32)((struct octeon_instr_ih3 *)
+ (&sc->cmd.cmd3.ih3))->dlengsz;
+ else
+ len = (u32)((struct octeon_instr_ih2 *)
+ (&sc->cmd.cmd2.ih2))->dlengsz;
+
+ ring_doorbell = 1;
- ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
@@ -2881,7 +3093,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
union tx_info *tx_info;
int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more, j;
+ int j;
u64 dptr = 0;
u32 tag = 0;
@@ -2980,7 +3192,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
@@ -3055,15 +3270,23 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
- tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ if (OCTEON_CN23XX_PF(oct)) {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+ } else {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ }
if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
@@ -3077,12 +3300,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
-
if (unlikely(cmdsetup.s.timestamp))
- status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo);
else
- status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ status = octnet_send_nic_data_pkt(oct, &ndata);
if (status == IQ_SEND_FAILED)
goto lio_xmit_failed;
@@ -3190,8 +3411,8 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
* OCTNET_CMD_RXCSUM_DISABLE
* @returns SUCCESS or FAILURE
*/
-int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
- u8 rx_cmd)
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -3249,31 +3470,6 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
return ret;
}
-int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octnic_ctrl_pkt nctrl;
- int ret = 0;
-
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
-
- nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = param1;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
- nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
- ret);
- }
- return ret;
-}
-
/** \brief Net device fix features
* @param netdev pointer to network device
* @param request features requested
@@ -3492,8 +3688,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
- u32 resp_size, ctx_size;
+ u32 resp_size, ctx_size, data_size;
u32 ifidx_or_pfnum;
+ struct lio_version *vdata;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3515,21 +3712,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct liquidio_if_cfg_resp);
ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(octeon_dev, 0,
+ octeon_alloc_soft_command(octeon_dev, data_size,
resp_size, ctx_size);
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
- num_iqueues =
- CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- num_oqueues =
- CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- base_queue =
- CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
- gmx_port_id =
- CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- ifidx_or_pfnum = i;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ num_iqueues = octeon_dev->sriov_info.num_pf_rings;
+ num_oqueues = octeon_dev->sriov_info.num_pf_rings;
+ base_queue = octeon_dev->sriov_info.pf_srn;
+
+ gmx_port_id = octeon_dev->pf_num;
+ ifidx_or_pfnum = octeon_dev->pf_num;
+ } else {
+ num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ base_queue = CFG_GET_BASE_QUE_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ gmx_port_id = CFG_GET_GMXID_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ ifidx_or_pfnum = i;
+ }
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
@@ -3566,7 +3779,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- sleep_cond(&ctx->wc, &ctx->cond);
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
+ goto setup_nic_wait_intr;
+ }
+
retval = resp->status;
if (retval) {
dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
@@ -3633,12 +3850,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_GRO
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ if (OCTEON_CN23XX_PF(octeon_dev) ||
+ OCTEON_CN6XXX(octeon_dev)) {
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ }
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
@@ -3713,7 +3934,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
- octeon_dev->priv_flags = 0x0;
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
if (netdev->features & NETIF_F_LRO)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
@@ -3725,6 +3949,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev,
OCTNET_CMD_VERBOSE_ENABLE, 0);
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3760,6 +3987,8 @@ setup_nic_dev_fail:
octeon_free_soft_command(octeon_dev, sc);
+setup_nic_wait_intr:
+
while (i--) {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
@@ -3789,8 +4018,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* run port_config command for each port */
oct->ifcount = num_nic_ports;
- memset(oct->props, 0,
- sizeof(struct octdev_props) * num_nic_ports);
+ memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
for (i = 0; i < MAX_OCTEON_LINKS; i++)
oct->props[i].gmxport = -1;
@@ -3806,7 +4034,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
@@ -3818,6 +4046,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3880,6 +4109,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ int fw_loaded = 0;
char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
@@ -3901,9 +4131,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (!cn23xx_fw_loaded(octeon_dev)) {
+ fw_loaded = 0;
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
+ } else {
+ fw_loaded = 1;
+ }
+ } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
return 1;
+ }
/* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
@@ -3925,6 +4169,22 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_set_io_queues_off(octeon_dev);
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
+ return ret;
+ }
+ }
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
/* Setup the data structures that manage this Octeon's Input queues. */
if (octeon_setup_instr_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3936,14 +4196,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
- /* Initialize soft command buffer pool
- */
- if (octeon_setup_sc_buffer_pool(octeon_dev)) {
- dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
- return 1;
- }
- atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
-
/* Initialize lists to manage the requests of different types that
* arrive from user & kernel applications for this octeon device.
*/
@@ -3963,15 +4215,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
- /* The input and output queue registers were setup earlier (the queues
- * were not enabled). Any additional registers that need to be
- * programmed should be done now.
- */
- ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "Failed to configure device registers\n");
- return ret;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (octeon_allocate_ioq_vector(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return 1;
+ }
+
+ } else {
+ /* The input and output queue registers were setup earlier (the
+ * queues were not enabled). Any additional registers
+ * that need to be programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
}
/* Initialize the tasklet that handles output queue packet processing.*/
@@ -3985,63 +4245,76 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
/* Enable Octeon device interrupts */
- octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
/* Enable the input and output queues for this Octeon device */
- octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
+ return ret;
+ }
atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
-
- if (ddr_timeout == 0)
- dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+ if (!ddr_timeout) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
- schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
- /* Wait for the octeon to initialize DDR after the soft-reset. */
- while (ddr_timeout == 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ / 10)) {
- /* user probably pressed Control-C */
+ /* Wait for the octeon to initialize DDR after the soft-reset.*/
+ while (!ddr_timeout) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
return 1;
}
- }
- ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
- ret);
- return 1;
- }
- if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
- dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
- return 1;
- }
+ if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
- /* Divert uboot to take commands from host instead. */
- ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
- dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
- ret = octeon_init_consoles(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
- return 1;
- }
- ret = octeon_add_console(octeon_dev, 0);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
- return 1;
- }
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
- atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
- ret = load_firmware(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
- return 1;
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+ /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
+ * loaded
+ */
+ if (OCTEON_CN23XX_PF(octeon_dev))
+ octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
+ 2ULL);
}
handshake[octeon_dev->octeon_id].init_ok = 1;
@@ -4057,7 +4330,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->droq[j]->pkts_credit_reg);
/* Packets can start arriving on the output queues from this point. */
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 199a8b9c7dc5..0d990accb65e 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,24 @@
#include "octeon_config.h"
-#define LIQUIDIO_BASE_VERSION "1.4"
-#define LIQUIDIO_MICRO_VERSION ".1"
#define LIQUIDIO_PACKAGE ""
-#define LIQUIDIO_VERSION "1.4.1"
+#define LIQUIDIO_BASE_MAJOR_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MICRO_VERSION 1
+#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION)
+#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
+ __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
+ "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+
+struct lio_version {
+ u16 major;
+ u16 minor;
+ u16 micro;
+ u16 reserved;
+};
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -218,6 +232,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define OCTNET_CMD_ID_ACTIVE 0x1a
+
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -296,6 +313,13 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define LIO_SOFTCMDRESP_IH2 40
+#define LIO_SOFTCMDRESP_IH3 (40 + 8)
+
+#define LIO_PCICMD_O2 24
+#define LIO_PCICMD_O3 (24 + 8)
+
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -814,6 +838,8 @@ struct oct_link_stats {
#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
#define VITESSE_PHY_GPIO_HIGH 0x2
#define VITESSE_PHY_GPIO_LOW 0x3
+#define LED_IDENTIFICATION_ON 0x1
+#define LED_IDENTIFICATION_OFF 0x0
struct oct_mdio_cmd {
u64 op;
@@ -832,7 +858,7 @@ struct oct_mdio_cmd {
/* intrmod: max. packets to trigger interrupt */
#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
/* intrmod: max. time to trigger interrupt */
#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
/* 66xx:intrmod: min. time to trigger interrupt
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3396e3a8bab..c76556809ed1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -64,6 +64,34 @@
#define DEFAULT_NUM_NIC_PORTS_68XX 4
#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
+#define CN23XX_MAX_RINGS_PER_PF 64
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_DB_MIN 1
+#define CN23XX_DB_MAX 8
+#define CN23XX_DB_TIMEOUT 1
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_PKTSPER_INTR 128
+/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
+#define CN23XX_OQ_REFIL_THRESHOLD 128
+
+#define CN23XX_OQ_INTR_PKT 64
+#define CN23XX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_23XX 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+/* PEMs count */
+#define CN23XX_MAX_MACS 4
+
+#define CN23XX_DEF_IQ_INTR_THRESHOLD 32
+#define CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD (64 * 1024)
/* common OCTEON configuration macros */
#define CN6XXX_CFG_IO_QUEUES 32
#define OCTEON_32BYTE_INSTR 32
@@ -92,6 +120,9 @@
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+#define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
+#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
+
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
@@ -140,19 +171,24 @@
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
LIO_210NV, /* Two port, 68xx */
- LIO_410NV /* Four port, 68xx */
+ LIO_410NV, /* Four port, 68xx */
+ LIO_23XX /* 23xx */
};
#define LIO_210SV_NAME "210sv"
#define LIO_210NV_NAME "210nv"
#define LIO_410NV_NAME "410nv"
+#define LIO_23XX_NAME "23xx"
/** Structure to define the configuration attributes for each Input queue.
* Applicable to all Octeon processors
**/
struct octeon_iq_config {
#ifdef __BIG_ENDIAN_BITFIELD
- u64 reserved:32;
+ u64 reserved:16;
+
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
@@ -192,7 +228,10 @@ struct octeon_iq_config {
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
- u64 reserved:32;
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
+
+ u64 reserved:16;
#endif
};
@@ -416,11 +455,15 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
-/* Maximum number of Octeon Output queues */
-#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \
+ CN6XXX_MAX_INPUT_QUEUES)
-#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
-#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \
+ CN6XXX_MAX_OUTPUT_QUEUES)
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index bbb50ea66f16..01a50f3b0c8e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -25,12 +25,13 @@
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/crc32.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_main.h"
+#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -40,6 +41,10 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -177,6 +182,15 @@ struct octeon_pci_console_desc {
__cvmx_bootmem_desc_get(oct, addr, \
offsetof(struct cvmx_bootmem_named_block_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
/**
* This function is the implementation of the get macros defined
@@ -709,3 +723,104 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
return bytes_to_read;
}
+
+#define FBUF_SIZE (4 * 1024 * 1024)
+u8 fbuf[FBUF_SIZE];
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p = fbuf;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i, rem;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION,
+ h->version + strlen(LIQUIDIO_PACKAGE));
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ data += sizeof(struct octeon_firmware_file_header);
+
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
+
+ /* Write in 4MB chunks*/
+ rem = image_len;
+
+ while (rem) {
+ if (rem < FBUF_SIZE)
+ size = rem;
+ else
+ size = FBUF_SIZE;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
+ }
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 0eb504a4379a..586b68899b06 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -20,7 +20,6 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
-#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "liquidio_common.h"
@@ -32,8 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
+#include "cn23xx_pf_device.h"
/** Default configuration
* for CN66XX OCTEON Models.
@@ -420,6 +418,108 @@ static struct octeon_config default_cn68xx_210nv_conf = {
,
};
+static struct octeon_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS *
+ CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN23XX_DB_MIN,
+ .db_timeout = CN23XX_DB_TIMEOUT,
+ .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN23XX_OQ_INTR_PKT,
+ .oq_intr_time = CN23XX_OQ_INTR_TIME,
+ },
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+};
+
enum {
OCTEON_CONFIG_TYPE_DEFAULT = 0,
NUM_OCTEON_CONFS,
@@ -487,6 +587,8 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
} else if ((oct->chip_id == OCTEON_CN68XX) &&
(card_type == LIO_410NV)) {
ret = (void *)&default_cn68xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ ret = (void *)&default_cn23xx_conf;
}
break;
default:
@@ -501,7 +603,8 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
case OCTEON_CN66XX:
case OCTEON_CN68XX:
return lio_validate_cn6xxx_config_info(oct, conf);
-
+ case OCTEON_CN23XX_PF_VID:
+ return 0;
default:
break;
}
@@ -541,107 +644,6 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
-u8 fbuf[4 * 1024 * 1024];
-
-int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
- size_t size)
-{
- int ret = 0;
- u8 *p = fbuf;
- u32 crc32_result;
- u64 load_addr;
- u32 image_len;
- struct octeon_firmware_file_header *h;
- u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
- char *base;
-
- if (size < sizeof(struct octeon_firmware_file_header)) {
- dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
- (u32)size,
- (u32)sizeof(struct octeon_firmware_file_header));
- return -EINVAL;
- }
-
- h = (struct octeon_firmware_file_header *)data;
-
- if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
- dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
- return -EINVAL;
- }
-
- crc32_result = crc32((unsigned int)~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->crc32)) {
- dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
- crc32_result, be32_to_cpu(h->crc32));
- return -EINVAL;
- }
-
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- base = h->version + strlen(LIQUIDIO_PACKAGE);
- ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
- if (ret) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
- LIQUIDIO_BASE_VERSION, base);
- return -EINVAL;
- }
-
- if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
- dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
- be32_to_cpu(h->num_images));
- return -EINVAL;
- }
-
- dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
- h->version);
-
- data += sizeof(struct octeon_firmware_file_header);
-
- dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
- be32_to_cpu(h->num_images));
- /* load all images */
- for (i = 0; i < be32_to_cpu(h->num_images); i++) {
- load_addr = be64_to_cpu(h->desc[i].addr);
- image_len = be32_to_cpu(h->desc[i].len);
-
- dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
- image_len, load_addr);
-
- /* Write in 4MB chunks*/
- rem = image_len;
-
- while (rem) {
- if (rem < (4 * 1024 * 1024))
- size = rem;
- else
- size = 4 * 1024 * 1024;
-
- memcpy(p, data, size);
-
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
-
- data += size;
- rem -= (u32)size;
- load_addr += size;
- }
- }
- dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
- h->bootcmd);
-
- /* Invoke the bootcmd */
- ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-
- return 0;
-}
-
void octeon_free_device_mem(struct octeon_device *oct)
{
int i;
@@ -676,6 +678,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
configsize = sizeof(struct octeon_cn6xxx);
break;
+ case OCTEON_CN23XX_PF_VID:
+ configsize = sizeof(struct octeon_cn23xx_pf);
+ break;
default:
pr_err("%s: Unknown PCI Device: 0x%x\n",
__func__,
@@ -741,6 +746,45 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+int
+octeon_allocate_ioq_vector(struct octeon_device *oct)
+{
+ int i, num_ioqs = 0;
+ struct octeon_ioq_vector *ioq_vector;
+ int cpu_num;
+ int size;
+
+ if (OCTEON_CN23XX_PF(oct))
+ num_ioqs = oct->sriov_info.num_pf_rings;
+ size = sizeof(struct octeon_ioq_vector) * num_ioqs;
+
+ oct->ioq_vector = vmalloc(size);
+ if (!oct->ioq_vector)
+ return 1;
+ memset(oct->ioq_vector, 0, size);
+ for (i = 0; i < num_ioqs; i++) {
+ ioq_vector = &oct->ioq_vector[i];
+ ioq_vector->oct_dev = oct;
+ ioq_vector->iq_index = i;
+ ioq_vector->droq_index = i;
+
+ cpu_num = i % num_online_cpus();
+ cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
+
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID)
+ ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
+ else
+ ioq_vector->ioq_num = i;
+ }
+ return 0;
+}
+
+void
+octeon_free_ioq_vector(struct octeon_device *oct)
+{
+ vfree(oct->ioq_vector);
+}
+
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
@@ -749,10 +793,12 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
union oct_txpciq txpciq;
int numa_node = cpu_to_node(iq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
oct->num_iqs = 0;
@@ -769,6 +815,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
oct->instr_queue[0]->ifidx = 0;
txpciq.u64 = 0;
txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = oct->pfvf_hsword.pkind;
txpciq.s.use_qpg = 0;
txpciq.s.qpg = 0;
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
@@ -788,14 +835,17 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
}
-
oct->num_oqs = 0;
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
if (!oct->droq[0])
@@ -812,10 +862,10 @@ int octeon_setup_output_queues(struct octeon_device *oct)
void octeon_set_io_queues_off(struct octeon_device *oct)
{
- /* Disable the i/p and o/p queues for this Octeon. */
-
- octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ if (OCTEON_CN6XXX(oct)) {
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
}
void octeon_set_droq_pkt_op(struct octeon_device *oct,
@@ -825,14 +875,16 @@ void octeon_set_droq_pkt_op(struct octeon_device *oct,
u32 reg_val = 0;
/* Disable the i/p and o/p queues for this Octeon. */
- reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ if (OCTEON_CN6XXX(oct)) {
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
- if (enable)
- reg_val = reg_val | (1 << q_no);
- else
- reg_val = reg_val & (~(1 << q_no));
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ }
}
int octeon_init_dispatch_list(struct octeon_device *oct)
@@ -1019,6 +1071,9 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
if (OCTEON_CN6XXX(oct))
num_nic_ports =
CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf));
if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
@@ -1046,6 +1101,12 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
}
oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
+
+ for (i = 0; i < oct->num_iqs; i++)
+ oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
atomic_set(&oct->status, OCT_DEV_CORE_OK);
@@ -1108,8 +1169,10 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct)) {
default_oct_conf =
(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_FIELD(oct, cn23xx_pf, conf));
}
-
return default_oct_conf;
}
@@ -1141,7 +1204,9 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
* So write MSB first
*/
addrhi = (addr >> 32);
- if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX) ||
+ (oct->chip_id == OCTEON_CN23XX_PF_VID))
addrhi |= 0x00060000;
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
@@ -1185,8 +1250,15 @@ int octeon_mem_access_ok(struct octeon_device *oct)
u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
- access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ if (OCTEON_CN23XX_PF(oct)) {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
+ } else {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ }
return access_okay ? 0 : 1;
}
@@ -1226,3 +1298,39 @@ int lio_get_device_id(void *dev)
return octeon_dev->octeon_id;
return -1;
}
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+ u64 instr_cnt;
+ struct octeon_device *oct = NULL;
+
+ /* the whole thing needs to be atomic, ideally */
+ if (droq) {
+ spin_lock_bh(&droq->lock);
+ writel(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ spin_unlock_bh(&droq->lock);
+ oct = droq->oct_dev;
+ }
+ if (iq) {
+ spin_lock_bh(&iq->lock);
+ writel(iq->pkt_in_done, iq->inst_cnt_reg);
+ iq->pkt_in_done = 0;
+ spin_unlock_bh(&iq->lock);
+ oct = iq->oct_dev;
+ }
+ /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
+ *to trigger tx interrupts as well, if they are pending.
+ */
+ if (oct && OCTEON_CN23XX_PF(oct)) {
+ if (droq)
+ writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
+ /*we race with firmrware here. read and write the IN_DONE_CNTS*/
+ else if (iq) {
+ instr_cnt = readq(iq->inst_cnt_reg);
+ writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
+ CN23XX_INTR_RESEND),
+ iq->inst_cnt_reg);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 01edfb404346..da15c2ae9330 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -30,13 +30,19 @@
/** PCI VendorId Device Id */
#define OCTEON_CN68XX_PCIID 0x91177d
#define OCTEON_CN66XX_PCIID 0x92177d
-
+#define OCTEON_CN23XX_PCIID_PF 0x9702177d
/** Driver identifies chips by these Ids, created by clubbing together
* DeviceId+RevisionId; Where Revision Id is not used to distinguish
* between chips, a value of 0 is used for revision id.
*/
#define OCTEON_CN68XX 0x0091
#define OCTEON_CN66XX 0x0092
+#define OCTEON_CN23XX_PF_VID 0x9702
+
+/**RevisionId for the chips */
+#define OCTEON_CN23XX_REV_1_0 0x00
+#define OCTEON_CN23XX_REV_1_1 0x01
+#define OCTEON_CN23XX_REV_2_0 0x80
/** Endian-swap modes supported by Octeon. */
enum octeon_pci_swap_mode {
@@ -46,6 +52,9 @@ enum octeon_pci_swap_mode {
OCTEON_PCI_32BIT_LW_SWAP = 3
};
+#define OCTEON_OUTPUT_INTR (2)
+#define OCTEON_ALL_INTR 0xff
+
/*--------------- PCI BAR1 index registers -------------*/
/* BAR1 Mask */
@@ -198,9 +207,9 @@ struct octeon_fn_list {
void (*setup_oq_regs)(struct octeon_device *, u32);
irqreturn_t (*process_interrupt_regs)(void *);
+ u64 (*msix_interrupt_handler)(void *);
int (*soft_reset)(struct octeon_device *);
int (*setup_device_regs)(struct octeon_device *);
- void (*reinit_regs)(struct octeon_device *);
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
@@ -209,10 +218,10 @@ struct octeon_fn_list {
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
- void (*enable_interrupt)(void *);
- void (*disable_interrupt)(void *);
+ void (*enable_interrupt)(struct octeon_device *, u8);
+ void (*disable_interrupt)(struct octeon_device *, u8);
- void (*enable_io_queues)(struct octeon_device *);
+ int (*enable_io_queues)(struct octeon_device *);
void (*disable_io_queues)(struct octeon_device *);
};
@@ -266,11 +275,72 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int rx_on;
int napi_enabled;
int gmxport;
struct net_device *netdev;
};
+#define LIO_FLAG_MSIX_ENABLED 0x1
+#define MSIX_PO_INT 0x1
+#define MSIX_PI_INT 0x2
+
+struct octeon_pf_vf_hs_word {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+#else
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+#endif
+};
+
+struct octeon_sriov_info {
+ /* Actual rings left for PF device */
+ u32 num_pf_rings;
+
+ /* SRN of PF usable IO queues */
+ u32 pf_srn;
+ /* total pf rings */
+ u32 trs;
+
+};
+
+struct octeon_ioq_vector {
+ struct octeon_device *oct_dev;
+ int iq_index;
+ int droq_index;
+ int vector;
+ struct cpumask affinity_mask;
+ u32 ioq_num;
+};
+
/** The Octeon device.
* Each Octeon device has this structure to represent all its
* components.
@@ -296,7 +366,7 @@ struct octeon_device {
/** Octeon Chip type. */
u16 chip_id;
u16 rev_id;
-
+ u16 pf_num;
/** This device's id - set by the driver. */
u32 octeon_id;
@@ -305,7 +375,6 @@ struct octeon_device {
u16 flags;
#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
/** The state of this device */
atomic_t status;
@@ -395,6 +464,19 @@ struct octeon_device {
void *priv;
+ int num_msix_irqs;
+
+ void *msix_entries;
+
+ struct octeon_sriov_info sriov_info;
+
+ struct octeon_pf_vf_hs_word pfvf_hsword;
+
+ int msix_on;
+
+ /** IOq information of it's corresponding MSI-X interrupt. */
+ struct octeon_ioq_vector *ioq_vector;
+
int rx_pause;
int tx_pause;
@@ -402,12 +484,15 @@ struct octeon_device {
/* private flags to control driver-specific features through ethtool */
u32 priv_flags;
+
+ void *watchdog_task;
};
#define OCT_DRV_ONLINE 1
#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
+#define OCTEON_CN23XX_PF(oct) (oct->chip_id == OCTEON_CN23XX_PF_VID)
#define CHIP_FIELD(oct, TYPE, field) \
(((struct octeon_ ## TYPE *)(oct->chip))->field)
@@ -661,13 +746,24 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+void octeon_free_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct);
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
+
/* LiquidIO driver pivate flags */
enum {
OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
};
-static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
- u32 val)
+#define OCT_PRIV_FLAG_DEFAULT 0x0
+
+static inline u32 lio_get_priv_flag(struct octeon_device *octdev, u32 flag)
+{
+ return !!(octdev->priv_flags & (0x1 << flag));
+}
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev,
+ u32 flag, u32 val)
{
if (val)
octdev->priv_flags |= (0x1 << flag);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index e0afe4c1fd01..f60e5320daf4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -31,6 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -92,22 +93,25 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-/** Check for packets on Droq. This function should be called with
- * lock held.
+/** Check for packets on Droq. This function should be called with lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
+ u32 last_count;
pkt_count = readl(droq->pkts_sent_reg);
- if (pkt_count) {
- atomic_add(pkt_count, &droq->pkts_pending);
- writel(pkt_count, droq->pkts_sent_reg);
- }
- return pkt_count;
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ /* we shall write to cnts at napi irq enable or end of droq tasklet */
+ if (last_count)
+ atomic_add(last_count, &droq->pkts_pending);
+
+ return last_count;
}
static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
@@ -259,6 +263,11 @@ int octeon_init_droq(struct octeon_device *oct,
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
c_refill_threshold =
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
} else {
return 1;
}
@@ -564,7 +573,7 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
- } /* else (dispatch_fn ... */
+ }
return cnt;
}
@@ -735,16 +744,20 @@ octeon_droq_process_packets(struct octeon_device *oct,
u32 pkt_count = 0, pkts_processed = 0;
struct list_head *tmp, *tmp2;
+ /* Grab the droq lock */
+ spin_lock(&droq->lock);
+
+ octeon_droq_check_hw_for_pkts(droq);
pkt_count = atomic_read(&droq->pkts_pending);
- if (!pkt_count)
+
+ if (!pkt_count) {
+ spin_unlock(&droq->lock);
return 0;
+ }
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the droq lock */
- spin_lock(&droq->lock);
-
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
atomic_sub(pkts_processed, &droq->pkts_pending);
@@ -789,6 +802,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
spin_lock(&droq->lock);
while (total_pkts_processed < budget) {
+ octeon_droq_check_hw_for_pkts(droq);
+
pkts_available =
CVM_MIN((budget - total_pkts_processed),
(u32)(atomic_read(&droq->pkts_pending)));
@@ -803,8 +818,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
atomic_sub(pkts_processed, &droq->pkts_pending);
total_pkts_processed += pkts_processed;
-
- octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -874,8 +887,11 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
return 0;
}
break;
+ case OCTEON_CN23XX_PF_VID: {
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ }
+ break;
}
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 5a6fb9113bbd..5be002d5dba4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -261,6 +261,8 @@ struct octeon_droq {
u32 q_no;
+ u32 pkt_count;
+
struct octeon_droq_ops ops;
struct octeon_device *oct_dev;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index ff4b1d6f007b..e4d426ba18dc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -88,6 +88,8 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ u32 pkt_in_done;
+
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index bc14e4c27332..366298f7bcb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -38,12 +38,26 @@
#define DRV_NAME "LiquidIO"
-/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
*/
-int octeon_console_debug_enabled(u32 console);
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
/* BQL-related functions */
void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
@@ -167,22 +181,26 @@ cnnic_numa_alloc_aligned_dma(u32 size,
#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
free_pages(orig_ptr, get_order(size))
-static inline void
+static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
+ int errno = 0;
wait_queue_t we;
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
+ if (signal_pending(current)) {
+ errno = -EINTR;
goto out;
+ }
schedule();
}
out:
set_current_state(TASK_RUNNING);
remove_wait_queue(wait_queue, &we);
+ return errno;
}
static inline void
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 95a4bbedf557..0dc081a99b30 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index fb820dc7fcb7..e5d1debd05ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -26,8 +26,6 @@
#ifndef __OCTEON_NETWORK_H__
#define __OCTEON_NETWORK_H__
-#include <linux/version.h>
-#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
@@ -124,11 +122,21 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
+
+ /* work queue for link status */
+ struct cavium_wq link_status_wq;
+
};
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+#define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3))
+#define CIU3_WDOG_MASK 12ULL
+#define LIO_MONITOR_WDOG_EXPIRE 1
+#define LIO_MONITOR_CORE_STUCK_MSGD 2
+#define LIO_MAX_CORES 12
+
/**
* \brief Enable or disable feature
* @param netdev pointer to network device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 166727be928f..40ac1fe88956 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -36,6 +35,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
u32 rdatasize)
{
struct octeon_soft_command *sc;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -52,10 +52,19 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ /*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ }
irh->rflag = 1; /* a response is required */
@@ -64,7 +73,10 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
*sc->status_word = COMPLETION_WORD_INIT;
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct))
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ else
+ sc->cmd.cmd2.rptr = sc->dmarptr;
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -73,12 +85,9 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
}
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata,
- u32 xmit_more)
+ struct octnic_data_pkt *ndata)
{
- int ring_doorbell;
-
- ring_doorbell = !xmit_more;
+ int ring_doorbell = 1;
return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
ndata->buf, ndata->datasize,
@@ -183,8 +192,8 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
- __func__, nctrl->ncmd.s.cmd, retval);
+ dev_err(&oct->pci_dev->dev, "%s pf_num:%d soft command:%d send failed status: %x\n",
+ __func__, oct->pf_num, nctrl->ncmd.s.cmd, retval);
spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index b71a2bbe4bee..4b8da67b995f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -138,7 +138,7 @@ octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
/* assume that rflag is cleared so therefore front data will only have
* irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih2->fsz = 24;
+ ih2->fsz = LIO_PCICMD_O2;
ih2->tagtype = ORDERED_TAG;
ih2->grp = DEFAULT_POW_GRP;
@@ -196,7 +196,7 @@ octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
*/
ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
/*PKI IH*/
- ih3->fsz = 24 + 8;
+ ih3->fsz = LIO_PCICMD_O3;
if (!setup->s.gather) {
ih3->dlengsz = setup->s.u.datasize;
@@ -278,7 +278,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata, u32 xmit_more);
+ struct octnic_data_pkt *ndata);
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index d32492f185ff..90866bb50033 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -30,6 +30,7 @@
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -71,7 +72,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
-
+ else if (OCTEON_CN23XX_PF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
if (!conf) {
dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
oct->chip_id);
@@ -88,6 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+
iq->oct_dev = oct;
set_dev_node(&oct->pci_dev->dev, numa_node);
@@ -181,6 +184,9 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
if (OCTEON_CN6XXX(oct))
desc_size =
CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
vfree(iq->request_list);
@@ -383,7 +389,12 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (OCTEON_CN23XX_PF(oct))
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd3.irh;
+ else
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -499,6 +510,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
if (!oct)
return;
+
iq = oct->instr_queue[iq_no];
if (!iq)
return;
@@ -514,6 +526,8 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
/* Flush the instruction queue */
octeon_flush_iq(oct, iq, 1, 0);
+
+ lio_enable_irq(NULL, iq);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -580,6 +594,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
{
struct octeon_config *oct_cfg;
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -588,36 +604,88 @@ octeon_prepare_soft_command(struct octeon_device *oct,
oct_cfg = octeon_get_conf(oct);
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- ih2->tagtype = ATOMIC_TAG;
- ih2->tag = LIO_CONTROL;
- ih2->raw = 1;
- ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
-
- if (sc->datasize) {
- ih2->dlengsz = sc->datasize;
- ih2->rs = 1;
- }
-
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- irh->opcode = opcode;
- irh->subcode = subcode;
-
- /* opcode/subcode specific parameters (ossp) */
- irh->ossp = irh_ossp;
- sc->cmd.cmd2.ossp[0] = ossp0;
- sc->cmd.cmd2.ossp[1] = ossp1;
-
- if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rdp->pcie_port = oct->pcie_port;
- rdp->rlen = sc->rdatasize;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = ATOMIC_TAG;
+ pki_ih3->qpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /*PKI IH3*/
+ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /*PKI IH3*/
+ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+ ih3->fsz = LIO_PCICMD_O3;
+ }
- irh->rflag = 1;
- ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
- irh->rflag = 0;
- ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ } else {
+ irh->rflag = 0;
+ /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = LIO_PCICMD_O2;
+ }
}
}
@@ -625,23 +693,39 @@ int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- if (ih2->dlengsz) {
- WARN_ON(!sc->dmadptr);
- sc->cmd.cmd2.dptr = sc->dmadptr;
- }
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- if (irh->rflag) {
- WARN_ON(!sc->dmarptr);
- WARN_ON(!sc->status_word);
- *sc->status_word = COMPLETION_WORD_INIT;
-
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+ len = (u32)ih3->dlengsz;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+ }
+ len = (u32)ih2->dlengsz;
}
- len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 709049e36627..be52178d8cb6 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -91,8 +91,13 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rptr = sc->cmd.cmd2.rptr;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rptr = sc->cmd.cmd3.rptr;
+ } else {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
+ }
status = OCTEON_REQUEST_PENDING;
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
index 5c4615ccaa14..6b4d4add7353 100644
--- a/drivers/net/ethernet/cavium/thunder/Makefile
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -2,6 +2,7 @@
# Makefile for Cavium's Thunder ethernet device
#
+obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e29815d9e6f4..30426109711c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -20,6 +20,17 @@
#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
+
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4
@@ -41,40 +52,8 @@
/* Max pkinds */
#define NIC_MAX_PKIND 16
-/* Rx Channels */
-/* Receive channel configuration in TNS bypass mode
- * Below is configuration in TNS bypass mode
- * BGX0-LMAC0-CHAN0 - VNIC CHAN0
- * BGX0-LMAC1-CHAN0 - VNIC CHAN16
- * ...
- * BGX1-LMAC0-CHAN0 - VNIC CHAN128
- * ...
- * BGX1-LMAC3-CHAN0 - VNIC CHAN174
- */
-#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
-#define NIC_CHANS_PER_INF 128
-#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
-#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
-
-/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
-#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
-#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
-#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
-#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
-
-/* Tx scheduling */
-#define NIC_MAX_TL4 1024
-#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
-#define NIC_MAX_TL3 256
-#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
-#define NIC_MAX_TL2 64
-#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
-#define NIC_MAX_TL1 2
-
-/* TNS bypass mode */
-#define NIC_TL2_PER_BGX 32
-#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
-#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+/* Max when CPI_ALG is IP diffserv */
+#define NIC_MAX_CPI_PER_LMAC 64
/* NIC VF Interrupts */
#define NICVF_INTR_CQ 0
@@ -148,7 +127,6 @@ struct nicvf_cq_poll {
struct napi_struct napi;
};
-#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
@@ -273,6 +251,7 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
+#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
u8 vf_id;
@@ -326,7 +305,7 @@ struct nicvf {
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
- char irq_name[NIC_VF_MSIX_VECTORS][20];
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
@@ -369,6 +348,7 @@ struct nicvf {
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -485,6 +465,31 @@ struct set_loopback {
bool enable;
};
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ u8 msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ u16 rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ u8 tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ u16 rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ u16 sq_stat_mask;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -502,6 +507,7 @@ union nic_mbx {
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
};
#define NIC_NODE_ID_MASK 0x03
@@ -515,7 +521,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
static inline bool pass1_silicon(struct pci_dev *pdev)
{
- return pdev->revision < 8;
+ return (pdev->revision < 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
+}
+
+static inline bool pass2_silicon(struct pci_dev *pdev)
+{
+ return (pdev->revision >= 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}
int nicvf_set_real_num_queues(struct net_device *netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 85cc782b9060..2bbf4cbf08b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -20,8 +20,25 @@
#define DRV_NAME "thunder-nic"
#define DRV_VERSION "1.0"
+struct hw_info {
+ u8 bgx_cnt;
+ u8 chans_per_lmac;
+ u8 chans_per_bgx; /* Rx/Tx chans */
+ u8 chans_per_rgx;
+ u8 chans_per_lbk;
+ u16 cpi_cnt;
+ u16 rssi_cnt;
+ u16 rss_ind_tbl_size;
+ u16 tl4_cnt;
+ u16 tl3_cnt;
+ u8 tl2_cnt;
+ u8 tl1_cnt;
+ bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
+};
+
struct nicpf {
struct pci_dev *pdev;
+ struct hw_info *hw;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
@@ -36,22 +53,22 @@ struct nicpf {
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
- u8 vf_lmac_map[MAX_LMAC];
+ u8 *vf_lmac_map;
struct delayed_work dwork;
struct workqueue_struct *check_link;
- u8 link[MAX_LMAC];
- u8 duplex[MAX_LMAC];
- u32 speed[MAX_LMAC];
+ u8 *link;
+ u8 *duplex;
+ u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
bool msix_enabled;
u8 num_vec;
- struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ struct msix_entry *msix_entries;
bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ char irq_name[NIC_PF_MSIX_VECTORS][20];
};
/* Supported devices */
@@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset)
/* PF -> VF mailbox communication APIs */
static void nic_enable_mbx_intr(struct nicpf *nic)
{
- /* Enable mailbox interrupt for all 128 VFs */
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+ int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
+
+#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
+
+ /* Clear it, to avoid spurious interrupts (if any) */
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
+
+ /* Enable mailbox interrupt for all VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
+ /* One mailbox intr enable reg per 64 VFs */
+ if (vf_cnt > 64) {
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ }
}
static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
@@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
@@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node;
- mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+ mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -248,7 +278,8 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
/* Set minimum transmit packet size */
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
{
- int lmac;
+ int lmac, max_lmac;
+ u16 sdevid;
u64 lmac_cfg;
/* There is a issue in HW where-in while sending GSO sized
@@ -260,7 +291,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
if (size > 52)
size = 52;
- for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ /* 81xx's RGX has only one LMAC */
+ if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
+ max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+ else
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ for (lmac = 0; lmac < max_lmac; lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
lmac_cfg &= ~(0xF << 2);
lmac_cfg |= ((size / 4) << 2);
@@ -280,7 +318,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->num_vf_en = 0;
- for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
if (!(bgx_map & (1 << bgx)))
continue;
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
@@ -300,28 +338,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic_reg_write(nic,
NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
lmac_credit);
+
+ /* On CN81XX there are only 8 VFs but max possible no of
+ * interfaces are 9.
+ */
+ if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
+ nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
+ break;
+ }
}
}
+static void nic_free_lmacmem(struct nicpf *nic)
+{
+ kfree(nic->vf_lmac_map);
+ kfree(nic->link);
+ kfree(nic->duplex);
+ kfree(nic->speed);
+}
+
+static int nic_get_hw_info(struct nicpf *nic)
+{
+ u8 max_lmac;
+ u16 sdevid;
+ struct hw_info *hw = nic->hw;
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN88XX;
+ hw->chans_per_lmac = 16;
+ hw->chans_per_bgx = 128;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 4096;
+ hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 2;
+ hw->tl1_per_bgx = true;
+ break;
+ case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN81XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_rgx = 8;
+ hw->chans_per_lbk = 24;
+ hw->cpi_cnt = 512;
+ hw->rssi_cnt = 256;
+ hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 64;
+ hw->tl2_cnt = 16;
+ hw->tl1_cnt = 10;
+ hw->tl1_per_bgx = false;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN83XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_lbk = 64;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 1024;
+ hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 18;
+ hw->tl1_per_bgx = false;
+ break;
+ }
+ hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
+
+ /* Allocate memory for LMAC tracking elements */
+ max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
+ nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->vf_lmac_map)
+ goto error;
+ nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->link)
+ goto error;
+ nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->duplex)
+ goto error;
+ nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
+ if (!nic->speed)
+ goto error;
+ return 0;
+
+error:
+ nic_free_lmacmem(nic);
+ return -ENOMEM;
+}
+
#define BGX0_BLOCK 8
#define BGX1_BLOCK 9
-static void nic_init_hw(struct nicpf *nic)
+static int nic_init_hw(struct nicpf *nic)
{
- int i;
+ int i, err;
u64 cqm_cfg;
+ /* Get HW capability info */
+ err = nic_get_hw_info(nic);
+ if (err)
+ return err;
+
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* Disable TNS mode on both interfaces */
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ /* TNS and TNS bypass modes are present only on 88xx */
+ if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ }
+
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
(1ULL << 63) | BGX0_BLOCK);
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
@@ -351,11 +486,14 @@ static void nic_init_hw(struct nicpf *nic)
cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
+
+ return 0;
}
/* Channel parse index configuration */
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
{
+ struct hw_info *hw = nic->hw;
u32 vnic, bgx, lmac, chan;
u32 padd, cpi_count = 0;
u64 cpi_base, cpi, rssi_base, rssi;
@@ -365,9 +503,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
- rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+ rssi_base = vnic * hw->rss_ind_tbl_size;
/* Rx channel configuration */
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
@@ -439,7 +577,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf)
msg = (u64 *)&mbx;
mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
- mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -486,7 +624,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
/* 4 level transmit side scheduler configutation
* for TNS bypass mode
*
- * Sample configuration for SQ0
+ * Sample configuration for SQ0 on 88xx
* VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
* VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
* VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
@@ -499,6 +637,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq)
{
+ struct hw_info *hw = nic->hw;
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
@@ -517,21 +656,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- if (!sq->sqs_mode) {
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
- } else {
- for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
- if (nic->vf_sqs[pqs_vnic][svf] == vnic)
- break;
+ /* For 88xx 0-511 TL4 transmits via BGX0 and
+ * 512-1023 TL4s transmit via BGX1.
+ */
+ if (hw->tl1_per_bgx) {
+ tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+ if (!sq->sqs_mode) {
+ tl4 += (lmac * MAX_QUEUES_PER_QSET);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+ tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
+ tl4 += (svf * MAX_QUEUES_PER_QSET);
}
- tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
- tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
- tl4 += (svf * NIC_TL4_PER_LMAC);
- tl4 += (bgx * NIC_TL4_PER_BGX);
+ } else {
+ tl4 = (vnic * MAX_QUEUES_PER_QSET);
}
tl4 += sq_idx;
- tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
@@ -539,8 +685,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+
+ /* On 88xx 0-127 channels are for BGX0 and
+ * 127-255 channels for BGX1.
+ *
+ * On 81xx/83xx TL3_CHAN reg should be configured with channel
+ * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+ */
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ if (hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ else
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
/* Enable backpressure on the channel */
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
@@ -549,6 +706,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
/* No priorities as of now */
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+ /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+ * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+ * possible LMACs.
+ *
+ * This register doesn't exist on 88xx.
+ */
+ if (!hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+ lmac + (bgx * MAX_LMAC_PER_BGX));
}
/* Send primary nicvf pointer to secondary QS's VF */
@@ -620,7 +787,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
int bgx_idx, lmac_idx;
- if (lbk->vf_id > MAX_LMAC)
+ if (lbk->vf_id >= nic->num_vf_en)
return -1;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
@@ -631,6 +798,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+/* Reset statistics counters */
+static int nic_reset_stat_counters(struct nicpf *nic,
+ int vf, struct reset_stat_cfg *cfg)
+{
+ int i, stat, qnum;
+ u64 reg_addr;
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
+ if (cfg->rx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
+ if (cfg->tx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i <= 15; i++) {
+ qnum = i >> 1;
+ stat = i & 1 ? 1 : 0;
+ reg_addr = (vf << NIC_QS_ID_SHIFT) |
+ (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
+ if (cfg->rq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ if (cfg->sq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+ return 0;
+}
+
+static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
+{
+ u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
+ u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
+ (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
+
+ /* Configure tunnel parsing parameters */
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
+ (1ULL << 63 | UDP_GENEVE_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
+ ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
+ ((0xfULL << 60) | vxlan_prot_def));
+}
+
static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
{
int bgx, lmac;
@@ -669,18 +897,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx_addr += sizeof(u64);
}
- dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
__func__, mbx.msg.msg, vf);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
nic->link[vf] = 0;
nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
- ret = 1;
- break;
+ goto unlock;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -698,6 +925,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ /* Enable CQE_RX2_S extension in CQE_RX descriptor.
+ * This gets appended by default on 81xx/83xx chips,
+ * for consistency enabling the same on 88xx pass2
+ * where this is introduced.
+ */
+ if (pass2_silicon(nic->pdev))
+ nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+ if (!pass1_silicon(nic->pdev))
+ nic_enable_tunnel_parsing(nic, vf);
break;
case NIC_MBOX_MSG_RQ_BP_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
@@ -722,8 +958,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
- if (vf >= nic->num_vf_en)
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
break;
+ }
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -772,25 +1010,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
+ case NIC_MBOX_MSG_RESET_STAT_COUNTER:
+ ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
break;
}
- if (!ret)
+ if (!ret) {
nic_mbx_send_ack(nic, vf);
- else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
+ dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
+ mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
+ }
unlock:
nic->mbx_lock[vf] = false;
}
-static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+ int mbx;
u64 intr;
u8 vf, vf_per_mbx_reg = 64;
+ if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+ mbx = 0;
+ else
+ mbx = 1;
+
intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
for (vf = 0; vf < vf_per_mbx_reg; vf++) {
@@ -802,23 +1053,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
nic_clear_mbx_intr(nic, vf, mbx);
}
}
-}
-
-static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 0);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 1);
-
return IRQ_HANDLED;
}
@@ -826,7 +1060,13 @@ static int nic_enable_msix(struct nicpf *nic)
{
int i, ret;
- nic->num_vec = NIC_PF_MSIX_VECTORS;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+
+ nic->msix_entries = kmalloc_array(nic->num_vec,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic->msix_entries)
+ return -ENOMEM;
for (i = 0; i < nic->num_vec; i++)
nic->msix_entries[i].entry = i;
@@ -834,8 +1074,9 @@ static int nic_enable_msix(struct nicpf *nic)
ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
if (ret) {
dev_err(&nic->pdev->dev,
- "Request for #%d msix vectors failed\n",
- nic->num_vec);
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ kfree(nic->msix_entries);
return ret;
}
@@ -847,6 +1088,7 @@ static void nic_disable_msix(struct nicpf *nic)
{
if (nic->msix_enabled) {
pci_disable_msix(nic->pdev);
+ kfree(nic->msix_entries);
nic->msix_enabled = 0;
nic->num_vec = 0;
}
@@ -865,27 +1107,26 @@ static void nic_free_all_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
- int ret;
+ int i, ret;
/* Enable MSI-X */
ret = nic_enable_msix(nic);
if (ret)
return ret;
- /* Register mailbox interrupt handlers */
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
- nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
- if (ret)
- goto fail;
-
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+ /* Register mailbox interrupt handler */
+ for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
+ sprintf(nic->irq_name[i],
+ "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
- nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
- if (ret)
- goto fail;
+ ret = request_irq(nic->msix_entries[i].vector,
+ nic_mbx_intr_handler, 0,
+ nic->irq_name[i], nic);
+ if (ret)
+ goto fail;
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+ nic->irq_allocated[i] = true;
+ }
/* Enable mailbox interrupt */
nic_enable_mbx_intr(nic);
@@ -894,6 +1135,7 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
dev_err(&nic->pdev->dev, "Request irq failed\n");
nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
return ret;
}
@@ -908,6 +1150,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
u16 total_vf;
+ /* Secondary Qsets are needed only if CPU count is
+ * morethan MAX_QUEUES_PER_QSET.
+ */
+ if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
+ return 0;
+
/* Check if its a multi-node environment */
if (nr_node_ids > 1)
sqs_per_vf = MAX_SQS_PER_VF;
@@ -1013,6 +1261,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic)
return -ENOMEM;
+ nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
+ if (!nic->hw) {
+ devm_kfree(dev, nic);
+ return -ENOMEM;
+ }
+
pci_set_drvdata(pdev, nic);
nic->pdev = pdev;
@@ -1052,13 +1306,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->node = nic_get_node_id(pdev);
- nic_set_lmac_vf_mapping(nic);
-
/* Initialize hardware */
- nic_init_hw(nic);
+ err = nic_init_hw(nic);
+ if (err)
+ goto err_release_regions;
- /* Set RSS TBL size for each VF */
- nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ nic_set_lmac_vf_mapping(nic);
/* Register interrupts */
err = nic_register_interrupts(nic);
@@ -1091,6 +1344,9 @@ err_unregister_interrupts:
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
+ nic_free_lmacmem(nic);
+ devm_kfree(dev, nic->hw);
+ devm_kfree(dev, nic);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
@@ -1111,6 +1367,11 @@ static void nic_remove(struct pci_dev *pdev)
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
+
+ nic_free_lmacmem(nic);
+ devm_kfree(&pdev->dev, nic->hw);
+ devm_kfree(&pdev->dev, nic);
+
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index fab35a593898..edf779f5a227 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -36,6 +36,20 @@
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_RX_GENEVE_DEF (0x0580)
+#define UDP_GENEVE_PORT_NUM 0x17C1ULL
+#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588)
+#define IPV6_PROT 0x86DDULL
+#define IPV4_PROT 0x800ULL
+#define ET_PROT 0x6558ULL
+#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598)
+#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0)
+#define UDP_VXLAN_PORT_NUM 0x12B5
+#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0)
+#define IPV6_PROT_DEF 0x2ULL
+#define IPV4_PROT_DEF 0x1ULL
+#define ET_PROT_DEF 0x3ULL
+#define NIC_PF_RX_CFG (0x05D0)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
@@ -103,6 +117,7 @@
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_LMAC (0x540000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3240349615bd..45a13f718863 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -29,10 +29,20 @@
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA134) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_NIC_VF) },
{ 0, } /* end of table */
};
@@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
- if (nic->pf_nacked)
+ if (nic->pf_nacked) {
+ netdev_err(nic->netdev,
+ "PF NACK to mbox msg 0x%02x from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
return -EINVAL;
+ }
msleep(sleep);
if (nic->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
netdev_err(nic->netdev,
- "PF didn't ack to mbox msg %d from VF%d\n",
+ "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return -EBUSY;
}
@@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic)
rss->enable = true;
- /* Using the HW reset value for now */
- rss->key[0] = 0xFEED0BADFEED0BADULL;
- rss->key[1] = 0xFEED0BADFEED0BADULL;
- rss->key[2] = 0xFEED0BADFEED0BADULL;
- rss->key[3] = 0xFEED0BADFEED0BADULL;
- rss->key[4] = 0xFEED0BADFEED0BADULL;
-
+ netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
@@ -507,7 +515,9 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
- struct cqe_send_t *cqe_tx, int cqe_type)
+ struct cqe_send_t *cqe_tx,
+ int cqe_type, int budget,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -538,7 +548,9 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
- dev_consume_skb_any(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -653,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
loop:
@@ -691,7 +704,8 @@ loop:
break;
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
- (void *)cq_desc, CQE_TYPE_SEND);
+ (void *)cq_desc, CQE_TYPE_SEND,
+ budget, &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -720,6 +734,9 @@ done:
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
@@ -933,16 +950,19 @@ static int nicvf_register_interrupts(struct nicvf *nic)
int vector;
for_each_cq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
- nic->vf_id, irq);
+ sprintf(nic->irq_name[irq], "%s-rxtx-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq));
for_each_sq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
- nic->vf_id, irq - NICVF_INTR_ID_SQ);
+ sprintf(nic->irq_name[irq], "%s-sq-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
for_each_rbdr_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
- nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+ sprintf(nic->irq_name[irq], "%s-rbdr-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
/* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
@@ -966,8 +986,9 @@ static int nicvf_register_interrupts(struct nicvf *nic)
}
/* Register QS error interrupt */
- sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
- "NICVF%d Qset error", nic->vf_id);
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR;
ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler,
@@ -1146,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -1196,7 +1220,7 @@ int nicvf_open(struct net_device *netdev)
}
/* Check if we got MAC address from PF or else generate a radom MAC */
- if (is_zero_ether_addr(netdev->dev_addr)) {
+ if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev);
nicvf_hw_set_mac_addr(nic, netdev);
}
@@ -1533,14 +1557,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- qcount = MAX_CMP_QUEUES_PER_QS;
+ qcount = netif_get_num_default_rss_queues();
/* Restrict multiqset support only for host bound VFs */
if (pdev->is_virtfn) {
/* Set max number of queues per VF */
- qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
- qcount = min(qcount,
- (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ qcount = min_t(int, num_online_cpus(),
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
}
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index dda3ea3f3bb6..a4fc50155881 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
}
+static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ /* Reset all RXQ's stats */
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
/* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
@@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->qs = qs;
/* Set count of each queue */
- qs->rbdr_cnt = RBDR_CNT;
- qs->rq_cnt = RCV_QUEUE_CNT;
- qs->sq_cnt = SND_QUEUE_CNT;
- qs->cq_cnt = CMP_QUEUE_CNT;
+ qs->rbdr_cnt = DEFAULT_RBDR_CNT;
+ qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
+ qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
+ qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
/* Set queue lengths */
qs->rbdr_len = RCV_BUF_COUNT;
@@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
nicvf_free_resources(nic);
}
+ /* Reset RXQ's stats.
+ * SQ's stats will get reset automatically once SQ is reset.
+ */
+ nicvf_reset_rcv_queue_stats(nic);
+
return 0;
}
@@ -1067,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
imm->len = 1;
}
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1126,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb;
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- /* Inform HW to xmit all TSO segments */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1204,12 +1233,8 @@ doorbell:
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
- /* Inform HW to xmit new packet */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, subdesc_cnt);
return 1;
append_fail:
@@ -1234,13 +1259,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int frag;
int payload_len = 0;
struct sk_buff *skb = NULL;
- struct sk_buff *skb_frag = NULL;
- struct sk_buff *prev_frag = NULL;
+ struct page *page;
+ int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
- rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+ * CQE_RX at word6, hence buffer pointers move by word
+ *
+ * Use existing 'hw_tso' flag which will be set for all chips
+ * except 88xx pass1 instead of a additional cache line
+ * access (or miss) by using pci dev's revision.
+ */
+ if (!nic->hw_tso)
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ else
+ rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
@@ -1258,22 +1293,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
- payload_len);
- if (!skb_frag) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- if (!skb_shinfo(skb)->frag_list)
- skb_shinfo(skb)->frag_list = skb_frag;
- else
- prev_frag->next = skb_frag;
-
- prev_frag = skb_frag;
- skb->len += payload_len;
- skb->data_len += payload_len;
- skb_frag->len = payload_len;
+ page = virt_to_page(phys_to_virt(*rb_ptrs));
+ offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset, payload_len, RCV_FRAG_LEN);
}
/* Next buffer pointer */
rb_ptrs++;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 6673e1133523..869f3386028b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -57,10 +57,7 @@
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
-#define RBDR_CNT 1
-#define RCV_QUEUE_CNT 8
-#define SND_QUEUE_CNT 8
-#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+#define DEFAULT_RBDR_CNT 1
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 63a39ac97d53..8bbaedbb7b94 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -28,6 +28,9 @@ struct lmac {
struct bgx *bgx;
int dmac;
u8 mac[ETH_ALEN];
+ u8 lmac_type;
+ u8 lane_to_sds;
+ bool use_training;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -43,14 +46,13 @@ struct lmac {
struct bgx {
u8 bgx_id;
- u8 qlm_mode;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
- int lmac_type;
- int lane_to_sds;
- int use_training;
+ u8 max_lmac;
void __iomem *reg_base;
struct pci_dev *pdev;
+ bool is_dlm;
+ bool is_rgx;
};
static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
@@ -61,6 +63,7 @@ static int bgx_xaui_check_link(struct lmac *lmac);
/* Supported devices */
static const struct pci_device_id bgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
{ 0, } /* end of table */
};
@@ -124,8 +127,8 @@ unsigned bgx_get_map(int node)
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
map |= (1 << i);
}
@@ -138,7 +141,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac_count;
@@ -153,7 +156,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -166,7 +169,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -177,7 +180,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -188,11 +191,13 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct lmac *lmac;
u64 cfg;
if (!bgx)
return;
+ lmac = &bgx->lmac[lmacid];
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
if (enable)
@@ -200,6 +205,9 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
else
cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+ xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
@@ -266,9 +274,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
- /* renable lmac */
+ /* Re-enable lmac */
cmr_cfg |= CMR_EN;
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
+ xcv_setup_link(lmac->link_up, lmac->last_speed);
}
static void bgx_lmac_handler(struct net_device *netdev)
@@ -314,7 +325,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -328,7 +339,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -356,7 +367,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -379,8 +390,9 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
}
EXPORT_SYMBOL(bgx_lmac_internal_loopback);
-static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
{
+ int lmacid = lmac->lmacid;
u64 cfg;
bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
@@ -409,18 +421,29 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
- if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
- PCS_MRX_STATUS_AN_CPT, false)) {
- dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
- return -1;
+ if (lmac->lmac_type == BGX_MODE_QSGMII) {
+ /* Disable disparity check for QSGMII */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg &= ~PCS_MISC_CTL_DISP_EN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ return 0;
+ }
+
+ if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
}
return 0;
}
-static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
{
u64 cfg;
+ int lmacid = lmac->lmacid;
/* Reset SPU */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
@@ -436,12 +459,14 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
/* Set interleaved running disparity for RXAUI */
- if (bgx->lmac_type != BGX_MODE_RXAUI)
- bgx_reg_modify(bgx, lmacid,
- BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- else
+ if (lmac->lmac_type == BGX_MODE_RXAUI)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
- SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+ SPU_MISC_CTL_INTLV_RDISP);
+
+ /* Clear receive packet disable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
/* clear all interrupts */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
@@ -451,7 +476,7 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
- if (bgx->use_training) {
+ if (lmac->use_training) {
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
@@ -474,9 +499,9 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
- if (bgx->lmac_type == BGX_MODE_10G_KR)
+ if (lmac->lmac_type == BGX_MODE_10G_KR)
cfg |= (1 << 23);
- else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ else if (lmac->lmac_type == BGX_MODE_40G_KR)
cfg |= (1 << 24);
else
cfg &= ~((1 << 23) | (1 << 24));
@@ -511,11 +536,10 @@ static int bgx_xaui_check_link(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
int lmacid = lmac->lmacid;
- int lmac_type = bgx->lmac_type;
+ int lmac_type = lmac->lmac_type;
u64 cfg;
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -556,7 +580,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -584,11 +608,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Clear receive packet disable */
- cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
- cfg &= ~SPU_MISC_CTL_RX_DIS;
- bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
-
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
@@ -599,7 +618,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
/* Rx local/remote fault seen.
* Do lmac reinit to see if condition recovers
*/
- bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+ bgx_lmac_xaui_init(bgx, lmac);
return -1;
}
@@ -623,7 +642,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
- if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
else
lmac->last_speed = 10000;
@@ -649,6 +668,16 @@ static void bgx_poll_for_link(struct work_struct *work)
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
}
+static int phy_interface_mode(u8 lmac_type)
+{
+ if (lmac_type == BGX_MODE_QSGMII)
+ return PHY_INTERFACE_MODE_QSGMII;
+ if (lmac_type == BGX_MODE_RGMII)
+ return PHY_INTERFACE_MODE_RGMII;
+
+ return PHY_INTERFACE_MODE_SGMII;
+}
+
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
@@ -657,13 +686,15 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
- if (bgx->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) ||
+ (lmac->lmac_type == BGX_MODE_QSGMII) ||
+ (lmac->lmac_type == BGX_MODE_RGMII)) {
lmac->is_sgmii = 1;
- if (bgx_lmac_sgmii_init(bgx, lmacid))
+ if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
lmac->is_sgmii = 0;
- if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -685,10 +716,10 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR)) {
if (!lmac->phydev)
return -ENODEV;
@@ -696,7 +727,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
bgx_lmac_handler,
- PHY_INTERFACE_MODE_SGMII))
+ phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
phy_start_aneg(lmac->phydev);
@@ -753,76 +784,19 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
bgx_flush_dmac_addrs(bgx, lmacid);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
lmac->phydev = NULL;
}
-static void bgx_set_num_ports(struct bgx *bgx)
-{
- u64 lmac_count;
-
- switch (bgx->qlm_mode) {
- case QLM_MODE_SGMII:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_SGMII;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_RXAUI_2X2:
- bgx->lmac_count = 2;
- bgx->lmac_type = BGX_MODE_RXAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_XFI_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_XFI;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XLAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XLAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_10G_KR_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_10G_KR;
- bgx->lane_to_sds = 0;
- bgx->use_training = 1;
- break;
- case QLM_MODE_40G_KR4_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_40G_KR;
- bgx->lane_to_sds = 0xE4;
- bgx->use_training = 1;
- break;
- default:
- bgx->lmac_count = 0;
- break;
- }
-
- /* Check if low level firmware has programmed LMAC count
- * based on board type, if yes consider that otherwise
- * the default static values
- */
- lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
- if (lmac_count != 4)
- bgx->lmac_count = lmac_count;
-}
-
static void bgx_init_hw(struct bgx *bgx)
{
int i;
-
- bgx_set_num_ports(bgx);
+ struct lmac *lmac;
bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
@@ -830,17 +804,9 @@ static void bgx_init_hw(struct bgx *bgx)
/* Set lmac type and lane2serdes mapping */
for (i = 0; i < bgx->lmac_count; i++) {
- if (bgx->lmac_type == BGX_MODE_RXAUI) {
- if (i)
- bgx->lane_to_sds = 0x0e;
- else
- bgx->lane_to_sds = 0x04;
- bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | bgx->lane_to_sds);
- continue;
- }
+ lmac = &bgx->lmac[i];
bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ (lmac->lmac_type << 8) | lmac->lane_to_sds);
bgx->lmac[i].lmacid_bd = lmac_count;
lmac_count++;
}
@@ -863,55 +829,212 @@ static void bgx_init_hw(struct bgx *bgx)
bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
}
-static void bgx_get_qlm_mode(struct bgx *bgx)
+static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
+{
+ return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
+}
+
+static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
{
struct device *dev = &bgx->pdev->dev;
- int lmac_type;
- int train_en;
+ struct lmac *lmac;
+ char str[20];
+ u8 dlm;
- /* Read LMAC0 type to figure out QLM mode
- * This is configured by low level firmware
- */
- lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
- lmac_type = (lmac_type >> 8) & 0x07;
+ if (lmacid > bgx->max_lmac)
+ return;
- train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
- SPU_PMD_CRTL_TRAIN_EN;
+ lmac = &bgx->lmac[lmacid];
+ dlm = (lmacid / 2) + (bgx->bgx_id * 2);
+ if (!bgx->is_dlm)
+ sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
+ else
+ sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
- switch (lmac_type) {
+ switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ dev_info(dev, "%s: SGMII\n", (char *)str);
break;
case BGX_MODE_XAUI:
- bgx->qlm_mode = QLM_MODE_XAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: XAUI\n", (char *)str);
break;
case BGX_MODE_RXAUI:
- bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
- dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: RXAUI\n", (char *)str);
break;
case BGX_MODE_XFI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XFI_4X1;
- dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
- dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XFI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 10G_KR\n", (char *)str);
break;
case BGX_MODE_XLAUI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
- dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XLAUI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 40G_KR4\n", (char *)str);
+ break;
+ case BGX_MODE_QSGMII:
+ if ((lmacid == 0) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
+ return;
+ if ((lmacid == 2) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
+ return;
+ dev_info(dev, "%s: QSGMII\n", (char *)str);
+ break;
+ case BGX_MODE_RGMII:
+ dev_info(dev, "%s: RGMII\n", (char *)str);
+ break;
+ case BGX_MODE_INVALID:
+ /* Nothing to do */
+ break;
+ }
+}
+
+static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
+{
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ case BGX_MODE_XFI:
+ lmac->lane_to_sds = lmac->lmacid;
+ break;
+ case BGX_MODE_XAUI:
+ case BGX_MODE_XLAUI:
+ case BGX_MODE_RGMII:
+ lmac->lane_to_sds = 0xE4;
+ break;
+ case BGX_MODE_RXAUI:
+ lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
+ break;
+ case BGX_MODE_QSGMII:
+ /* There is no way to determine if DLM0/2 is QSGMII or
+ * DLM1/3 is configured to QSGMII as bootloader will
+ * configure all LMACs, so take whatever is configured
+ * by low level firmware.
+ */
+ lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
break;
default:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ lmac->lane_to_sds = 0;
+ break;
+ }
+}
+
+static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
+{
+ if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR)) {
+ lmac->use_training = 0;
+ return;
+ }
+
+ lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+}
+
+static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+{
+ struct lmac *lmac;
+ struct lmac *olmac;
+ u64 cmr_cfg;
+ u8 lmac_type;
+ u8 lane_to_sds;
+
+ lmac = &bgx->lmac[idx];
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
+ if (bgx->is_rgx)
+ lmac->lmac_type = BGX_MODE_RGMII;
+ lmac_set_training(bgx, lmac, 0);
+ lmac_set_lane2sds(bgx, lmac);
+ return;
+ }
+
+ /* On 81xx BGX can be split across 2 DLMs
+ * firmware programs lmac_type of LMAC0 and LMAC2
+ */
+ if ((idx == 0) || (idx == 2)) {
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is not reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
+ lmac_set_lane2sds(bgx, lmac);
+
+ /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
+ olmac = &bgx->lmac[idx + 1];
+ olmac->lmac_type = lmac->lmac_type;
+ lmac_set_training(bgx, olmac, olmac->lmacid);
+ lmac_set_lane2sds(bgx, olmac);
+ }
+}
+
+static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+
+ if (!bgx->is_dlm)
+ return true;
+
+ lmac = &bgx->lmac[0];
+ if (lmac->lmac_type == BGX_MODE_INVALID)
+ return false;
+
+ return true;
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ struct lmac *lmac01;
+ struct lmac *lmac23;
+ u8 idx;
+
+ /* Init all LMAC's type to invalid */
+ for (idx = 0; idx < bgx->max_lmac; idx++) {
+ lmac = &bgx->lmac[idx];
+ lmac->lmacid = idx;
+ lmac->lmac_type = BGX_MODE_INVALID;
+ lmac->use_training = false;
+ }
+
+ /* It is assumed that low level firmware sets this value */
+ bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (bgx->lmac_count > bgx->max_lmac)
+ bgx->lmac_count = bgx->max_lmac;
+
+ for (idx = 0; idx < bgx->max_lmac; idx++)
+ bgx_set_lmac_config(bgx, idx);
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ bgx_print_qlm_mode(bgx, 0);
+ return;
+ }
+
+ if (bgx->lmac_count) {
+ bgx_print_qlm_mode(bgx, 0);
+ bgx_print_qlm_mode(bgx, 2);
+ }
+
+ /* If DLM0 is not in BGX mode then LMAC0/1 have
+ * to be configured with serdes lanes of DLM1
+ */
+ if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
+ return;
+ for (idx = 0; idx < bgx->lmac_count; idx++) {
+ lmac01 = &bgx->lmac[idx];
+ lmac23 = &bgx->lmac[idx + 2];
+ lmac01->lmac_type = lmac23->lmac_type;
+ lmac01->lane_to_sds = lmac23->lane_to_sds;
}
}
@@ -1042,7 +1165,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX) {
+ if (lmac == bgx->max_lmac) {
of_node_put(node);
break;
}
@@ -1087,6 +1210,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
u8 lmac;
+ u16 sdevid;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
@@ -1115,10 +1239,30 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_release_regions;
}
- bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
- bgx_vnic[bgx->bgx_id] = bgx;
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
+ if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
+ bgx->bgx_id =
+ (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->max_lmac = MAX_LMAC_PER_BGX;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ } else {
+ bgx->is_rgx = true;
+ bgx->max_lmac = 1;
+ bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ xcv_init_hw();
+ }
+
+ /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
+ * BGX i.e BGX2 can be split across 2 DLMs.
+ */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
+ ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
+ bgx->is_dlm = true;
+
bgx_get_qlm_mode(bgx);
err = bgx_init_phy(bgx);
@@ -1133,6 +1277,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(dev, "BGX%d failed to enable lmac%d\n",
bgx->bgx_id, lmac);
+ while (lmac)
+ bgx_lmac_disable(bgx, --lmac);
goto err_enable;
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 42010d2e5ddf..d59c71e4a000 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -9,8 +9,20 @@
#ifndef THUNDER_BGX_H
#define THUNDER_BGX_H
-#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+/* PCI device ID */
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+#define PCI_DEVICE_ID_THUNDER_RGX 0xA054
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
+#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
+#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
+
+#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
#define MAX_BGX_PER_CN88XX 2
+#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
+#define MAX_BGX_PER_CN83XX 4
+#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
@@ -18,8 +30,6 @@
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
-#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
-
/* Registers */
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
@@ -136,6 +146,7 @@
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020
@@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
@@ -213,16 +227,9 @@ enum LMAC_TYPE {
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
-};
-
-enum qlm_mode {
- QLM_MODE_SGMII, /* SGMII, each lane independent */
- QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
- QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
- QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
- QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
- QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
- QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+ BGX_MODE_RGMII = 5,
+ BGX_MODE_QSGMII = 6,
+ BGX_MODE_INVALID = 7,
};
#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
new file mode 100644
index 000000000000..67befedef709
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-xcv"
+#define DRV_VERSION "1.0"
+
+/* Register offsets */
+#define XCV_RESET 0x00
+#define PORT_EN BIT_ULL(63)
+#define CLK_RESET BIT_ULL(15)
+#define DLL_RESET BIT_ULL(11)
+#define COMP_EN BIT_ULL(7)
+#define TX_PKT_RESET BIT_ULL(3)
+#define TX_DATA_RESET BIT_ULL(2)
+#define RX_PKT_RESET BIT_ULL(1)
+#define RX_DATA_RESET BIT_ULL(0)
+#define XCV_DLL_CTL 0x10
+#define CLKRX_BYP BIT_ULL(23)
+#define CLKTX_BYP BIT_ULL(15)
+#define XCV_COMP_CTL 0x20
+#define DRV_BYP BIT_ULL(63)
+#define XCV_CTL 0x30
+#define XCV_INT 0x40
+#define XCV_INT_W1S 0x48
+#define XCV_INT_ENA_W1C 0x50
+#define XCV_INT_ENA_W1S 0x58
+#define XCV_INBND_STATUS 0x80
+#define XCV_BATCH_CRD_RET 0x100
+
+struct xcv {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct xcv *xcv;
+
+/* Supported devices */
+static const struct pci_device_id xcv_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, xcv_id_table);
+
+void xcv_init_hw(void)
+{
+ u64 cfg;
+
+ /* Take DLL out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~DLL_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Take clock tree out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ /* Wait for DLL to lock */
+ msleep(1);
+
+ /* Configure DLL - enable or bypass
+ * TX no bypass, RX bypass
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL);
+ cfg &= ~0xFF03;
+ cfg |= CLKRX_BYP;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL);
+
+ /* Enable compensation controller and force the
+ * write to be visible to HW by readig back.
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= COMP_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ /* Wait for compensation state machine to lock */
+ msleep(10);
+
+ /* enable the XCV block */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= PORT_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+}
+EXPORT_SYMBOL(xcv_init_hw);
+
+void xcv_setup_link(bool link_up, int link_speed)
+{
+ u64 cfg;
+ int speed = 2;
+
+ if (!xcv) {
+ dev_err(&xcv->pdev->dev,
+ "XCV init not done, probe may have failed\n");
+ return;
+ }
+
+ if (link_speed == 100)
+ speed = 1;
+ else if (link_speed == 10)
+ speed = 0;
+
+ if (link_up) {
+ /* set operating speed */
+ cfg = readq_relaxed(xcv->reg_base + XCV_CTL);
+ cfg &= ~0x03;
+ cfg |= speed;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_CTL);
+
+ /* Reset datapaths */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_DATA_RESET | RX_DATA_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Enable the packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_PKT_RESET | RX_PKT_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Return credits to RGX */
+ writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET);
+ } else {
+ /* Disable packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~(TX_PKT_RESET | RX_PKT_RESET);
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ }
+}
+EXPORT_SYMBOL(xcv_setup_link);
+
+static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL);
+ if (!xcv)
+ return -ENOMEM;
+ xcv->pdev = pdev;
+
+ pci_set_drvdata(pdev, xcv);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_kfree;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!xcv->reg_base) {
+ dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_kfree:
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ return err;
+}
+
+static void xcv_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (xcv) {
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver xcv_driver = {
+ .name = DRV_NAME,
+ .id_table = xcv_id_table,
+ .probe = xcv_probe,
+ .remove = xcv_remove,
+};
+
+static int __init xcv_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&xcv_driver);
+}
+
+static void __exit xcv_cleanup_module(void)
+{
+ pci_unregister_driver(&xcv_driver);
+}
+
+module_init(xcv_init_module);
+module_exit(xcv_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..c6b71f656992 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index edd23386b47d..28e653e9c856 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -53,6 +53,8 @@
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
+extern struct list_head adapter_list;
+extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
@@ -338,12 +340,14 @@ struct adapter_params {
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
+ unsigned char crypto; /* HW capability for crypto */
unsigned char bypass;
unsigned int ofldq_wr_cred;
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ unsigned int nsched_cls; /* number of traffic classes */
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
unsigned int max_ird_adapter; /* Max read depth per adapter */
};
@@ -403,7 +407,6 @@ struct fw_info {
struct fw_hdr fw_hdr;
};
-
struct trace_params {
u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4];
@@ -434,11 +437,6 @@ enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
- MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
- MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
-
- /* # of streaming iSCSIT Rx queues */
- MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -455,8 +453,7 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
- MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
struct adapter;
@@ -493,6 +490,7 @@ struct port_info {
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
struct hwtstamp_config tstamp_config;
+ struct sched_table *sched_tbl;
};
struct dentry;
@@ -510,6 +508,10 @@ enum { /* adapter flags */
FW_OFLD_CONN = (1 << 9),
};
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+};
+
struct rx_sw_desc;
struct sge_fl { /* SGE free-buffer queue state */
@@ -680,17 +682,24 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp;
+struct sge_uld_rxq_info {
+ char name[IFNAMSIZ]; /* name of ULD driver */
+ struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
+ u16 *msix_tbl; /* msix_tbl for uld */
+ u16 *rspq_id; /* response queue id's of rxq */
+ u16 nrxq; /* # of ingress uld queues */
+ u16 nciq; /* # of completion queues */
+ u8 uld; /* uld type */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
- struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
- struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
- struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+ struct sge_uld_rxq_info **uld_rxq_info;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
@@ -698,14 +707,8 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 iscsiqsets; /* # of active iSCSI queue sets */
- u16 niscsitq; /* # of available iSCST Rx queues */
- u16 rdmaqs; /* # of available RDMA Rx queues */
- u16 rdmaciqs; /* # of available RDMA concentrator IQs */
- u16 iscsi_rxq[MAX_OFLD_QSETS];
- u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
- u16 rdma_rxq[MAX_RDMA_QUEUES];
- u16 rdma_ciq[MAX_RDMA_CIQS];
+ u16 ofldqsets; /* # of active ofld queue sets */
+ u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -729,10 +732,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
-#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
-#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
-#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
+#define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
struct l2t_data;
@@ -757,6 +757,23 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN];
};
+struct uld_msix_bmap {
+ unsigned long *msix_bmap;
+ unsigned int mapsize;
+ spinlock_t lock; /* lock for acquiring bitmap */
+};
+
+struct uld_msix_info {
+ unsigned short vec;
+ char desc[IFNAMSIZ + 10];
+ unsigned int idx;
+};
+
+struct vf_info {
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool pf_set_mac;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -767,6 +784,7 @@ struct adapter {
unsigned int mbox;
unsigned int pf;
unsigned int flags;
+ unsigned int adap_idx;
enum chip_type chip;
int msg_enable;
@@ -779,6 +797,9 @@ struct adapter {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
+ struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
+ int msi_idx;
struct doorbell_stats db_stats;
struct sge sge;
@@ -786,6 +807,9 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ struct vf_info *vfinfo;
+ u8 num_vfs;
+
u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
@@ -793,7 +817,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
+ unsigned int num_uld;
+ unsigned int num_ofld_uld;
struct list_head list_node;
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
@@ -813,6 +840,8 @@ struct adapter {
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
+ struct mutex uld_mutex;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -822,6 +851,58 @@ struct adapter {
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
+
+ /* TC u32 offload */
+ struct cxgb4_tc_u32_table *tc_u32;
+};
+
+/* Support for "sched-class" command to allow a TX Scheduling Class to be
+ * programmed with various parameters.
+ */
+struct ch_sched_params {
+ s8 type; /* packet or flow */
+ union {
+ struct {
+ s8 level; /* scheduler hierarchy level */
+ s8 mode; /* per-class or per-flow */
+ s8 rateunit; /* bit or packet rate */
+ s8 ratemode; /* %port relative or kbps absolute */
+ s8 channel; /* scheduler channel [0..N] */
+ s8 class; /* scheduler class [0..N] */
+ s32 minrate; /* minimum rate */
+ s32 maxrate; /* maximum rate */
+ s16 weight; /* percent weight */
+ s16 pktsize; /* average packet size */
+ } params;
+ } u;
+};
+
+enum {
+ SCHED_CLASS_TYPE_PACKET = 0, /* class type */
+};
+
+enum {
+ SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
+};
+
+enum {
+ SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
+};
+
+enum {
+ SCHED_CLASS_RATEUNIT_BITS = 0, /* bit rate scheduling */
+};
+
+enum {
+ SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
+};
+
+/* Support for "sched_queue" command to allow one or more NIC TX Queues
+ * to be bound to a TX Scheduling Class.
+ */
+struct ch_sched_queue {
+ s8 queue; /* queue index */
+ s8 class; /* class index */
};
/* Defined bit width of user definable filter tuples
@@ -947,11 +1028,47 @@ enum {
VLAN_REWRITE
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter. */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct filter_ctx *ctx; /* Caller's completion hook */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct net_device *dev; /* Associated net device */
+ u32 tid; /* This will store the actual tid */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
}
+static inline int is_pci_uld(const struct adapter *adap)
+{
+ return adap->params.crypto;
+}
+
+static inline int is_uld(const struct adapter *adap)
+{
+ return (adap->params.offload || adap->params.crypto);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1178,6 +1295,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
@@ -1185,8 +1304,6 @@ int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
int cxgb_busy_poll(struct napi_struct *napi);
-int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
- unsigned int cnt);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
@@ -1289,6 +1406,18 @@ static inline int hash_mac_addr(const u8 *addr)
return a & 0x3f;
}
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->adap = adap;
+ cxgb4_set_rspq_intr_params(q, us, cnt);
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1514,6 +1643,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+ int rateunit, int ratemode, int channel, int class,
+ int minrate, int maxrate, int weight, int pktsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_free_mem(void *addr);
void t4_idma_monitor_init(struct adapter *adapter,
@@ -1521,4 +1653,11 @@ void t4_idma_monitor_init(struct adapter *adapter,
void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
+int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ unsigned int naddr, u8 *addr);
+void t4_uld_mem_free(struct adapter *adap);
+int t4_uld_mem_alloc(struct adapter *adap);
+void t4_uld_clean_up(struct adapter *adap);
+void t4_register_netevent_notifier(void);
+void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 91fb50850fff..20455d082cb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
- int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
- int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
- int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+ int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int iscsi_idx = r - eth_entries;
- int iscsit_idx = iscsi_idx - iscsi_entries;
- int rdma_idx = iscsit_idx - iscsit_entries;
- int ciq_idx = rdma_idx - rdma_entries;
- int ctrl_idx = ciq_idx - ciq_entries;
+ int ofld_idx = r - eth_entries;
+ int ctrl_idx = ofld_idx - ofld_entries;
int fq_idx = ctrl_idx - ctrl_entries;
if (r)
@@ -2518,119 +2512,17 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (iscsi_idx < iscsi_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsirxq[iscsi_idx * 4];
+ } else if (ofld_idx < ofld_entries) {
const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
+ &adap->sge.ofldtxq[ofld_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
- S("QType:", "iSCSI");
+ S("QType:", "OFLD-Txq");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (iscsit_idx < iscsit_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsitrxq[iscsit_idx * 4];
- int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
-
- S("QType:", "iSCSIT");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (rdma_idx < rdma_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.rdmarxq[rdma_idx * 4];
- int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
-
- S("QType:", "RDMA-CPL");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (ciq_idx < ciq_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
- int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
-
- S("QType:", "RDMA-CIQ");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- RL("RxAN:", stats.an);
- RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
@@ -2672,10 +2564,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
- DIV_ROUND_UP(adap->sge.niscsitq, 4) +
- DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
- DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+ DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2859,12 +2748,6 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
-static int blocked_fl_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -2908,7 +2791,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
static const struct file_operations blocked_fl_fops = {
.owner = THIS_MODULE,
- .open = blocked_fl_open,
+ .open = simple_open,
.read = blocked_fl_read,
.write = blocked_fl_write,
.llseek = generic_file_llseek,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
new file mode 100644
index 000000000000..10736738ff30
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -0,0 +1,721 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "l2t.h"
+#include "t4fw_api.h"
+#include "cxgb4_filter.h"
+
+static inline bool is_field_set(u32 val, u32 mask)
+{
+ return val || mask;
+}
+
+static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
+{
+ return !(conf & conf_mask) && is_field_set(val, mask);
+}
+
+/* Validate filter spec against configuration done on the card. */
+static int validate_filter(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 fconf, iconf;
+
+ /* Check for unconfigured fields being used. */
+ fconf = adapter->params.tp.vlan_pri_map;
+ iconf = adapter->params.tp.ingress_config;
+
+ if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
+ unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
+ unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
+ unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
+ fs->mask.ethtype) ||
+ unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
+ unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
+ fs->mask.matchtype) ||
+ unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
+ unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
+ fs->mask.pfvf_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
+ fs->mask.ovlan_vld) ||
+ unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
+ return -EOPNOTSUPP;
+
+ /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
+ * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
+ * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
+ * below. Additionally, since the T4 firmware interface also
+ * carries that overlap, we need to translate any PF/VF
+ * specification into that internal format below.
+ */
+ if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ return -EOPNOTSUPP;
+ if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ (iconf & VNIC_F)))
+ return -EOPNOTSUPP;
+ if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
+ return -ERANGE;
+ fs->mask.pf &= 0x7;
+ fs->mask.vf &= 0x7f;
+
+ /* If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* Don't allow various trivially obvious bogus out-of-range values... */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* T4 doesn't support removing VLAN Tags for loop back filters. */
+ if (is_t4(adapter->params.chip) &&
+ fs->action == FILTER_SWITCH &&
+ (fs->newvlan == VLAN_REMOVE ||
+ fs->newvlan == VLAN_REWRITE))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int get_filter_steerq(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ int iq;
+
+ /* If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ if (fs->iq)
+ return -EINVAL;
+ iq = 0;
+ } else {
+ struct port_info *pi = netdev_priv(dev);
+
+ /* If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->nqsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->ftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+ if (family == PF_INET)
+ __clear_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ spin_unlock_bh(&t->ftid_lock);
+}
+
+/* Delete the filter at a specified index. */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ len = sizeof(*fwr);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ htonl(FW_FILTER_WR_TID_V(f->tid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+void clear_all_filters(struct adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ unsigned int max_ftid = adapter->tids.nftids +
+ adapter->tids.nsftids;
+
+ for (i = 0; i < max_ftid; i++, f++)
+ if (f->valid || f->pending)
+ clear_filter(adapter, f);
+ }
+}
+
+/* Fill up default masks for set match fields. */
+static void fill_default_mask(struct ch_filter_specification *fs)
+{
+ unsigned int lip = 0, lip_mask = 0;
+ unsigned int fip = 0, fip_mask = 0;
+ unsigned int i;
+
+ if (fs->val.iport && !fs->mask.iport)
+ fs->mask.iport |= ~0;
+ if (fs->val.fcoe && !fs->mask.fcoe)
+ fs->mask.fcoe |= ~0;
+ if (fs->val.matchtype && !fs->mask.matchtype)
+ fs->mask.matchtype |= ~0;
+ if (fs->val.macidx && !fs->mask.macidx)
+ fs->mask.macidx |= ~0;
+ if (fs->val.ethtype && !fs->mask.ethtype)
+ fs->mask.ethtype |= ~0;
+ if (fs->val.ivlan && !fs->mask.ivlan)
+ fs->mask.ivlan |= ~0;
+ if (fs->val.ovlan && !fs->mask.ovlan)
+ fs->mask.ovlan |= ~0;
+ if (fs->val.frag && !fs->mask.frag)
+ fs->mask.frag |= ~0;
+ if (fs->val.tos && !fs->mask.tos)
+ fs->mask.tos |= ~0;
+ if (fs->val.proto && !fs->mask.proto)
+ fs->mask.proto |= ~0;
+
+ for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
+ lip |= fs->val.lip[i];
+ lip_mask |= fs->mask.lip[i];
+ fip |= fs->val.fip[i];
+ fip_mask |= fs->mask.fip[i];
+ }
+
+ if (lip && !lip_mask)
+ memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
+
+ if (fip && !fip_mask)
+ memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
+
+ if (fs->val.lport && !fs->mask.lport)
+ fs->mask.lport = ~0;
+ if (fs->val.fport && !fs->mask.fport)
+ fs->mask.fport = ~0;
+}
+
+/* Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int max_fidx, fidx;
+ struct filter_entry *f;
+ u32 iconf;
+ int iq, ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ fill_default_mask(fs);
+
+ ret = validate_filter(dev, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+ if (iq < 0)
+ return iq;
+
+ /* IPv6 filters occupy four slots and must be aligned on
+ * four-slot boundaries. IPv4 filters only occupy a single
+ * slot and have no alignment requirements but writing a new
+ * IPv4 filter into the middle of an existing IPv6 filter
+ * requires clearing the old IPv6 filter and hence we prevent
+ * insertion.
+ */
+ if (fs->type == 0) { /* IPv4 */
+ /* If our IPv4 filter isn't being written to a
+ * multiple of four filter index and there's an IPv6
+ * filter at the multiple of 4 base slot, then we
+ * prevent insertion.
+ */
+ fidx = filter_id & ~0x3;
+ if (fidx != filter_id &&
+ adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
+ fidx, fidx + 3);
+ return -EINVAL;
+ }
+ }
+ } else { /* IPv6 */
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
+
+ /* Check all except the base overlapping IPv4 filter slots. */
+ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ if (ret)
+ return ret;
+
+ /* Check to make sure the filter requested is writable ... */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ return ret;
+ }
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adapter, f);
+
+ /* Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ iconf = adapter->params.tp.ingress_config;
+ if (iconf & VNIC_F) {
+ f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+ f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ /* Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(adapter, filter_id);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ clear_filter(adapter, f);
+ }
+
+ return ret;
+}
+
+/* Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct filter_entry *f;
+ unsigned int max_fidx;
+ int ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ f->fs.type ? PF_INET6 : PF_INET);
+ return del_filter_wr(adapter, filter_id);
+ }
+
+ /* If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ complete(&ctx->completion);
+ }
+ return ret;
+}
+
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+int cxgb4_del_filter(struct net_device *dev, int filter_id)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_del_filter(dev, filter_id, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+/* Handle a filter write/deletion reply. */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ struct filter_entry *f = NULL;
+ unsigned int max_fidx;
+ int idx;
+
+ max_fidx = adap->tids.nftids + adap->tids.nsftids;
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = TCB_COOKIE_G(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /* Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = 0;
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -ENOMEM;
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
+ }
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+ if (ctx)
+ complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
new file mode 100644
index 000000000000..23742cb1c69f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FILTER_H
+#define __CXGB4_FILTER_H
+
+#include "t4_msg.h"
+
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct adapter *adap, struct filter_entry *f);
+
+int set_filter_wr(struct adapter *adapter, int fidx);
+int delete_filter(struct adapter *adapter, unsigned int fidx);
+
+int writable_filter(struct filter_entry *f);
+void clear_all_filters(struct adapter *adapter);
+#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3ceafb55d6da..cf147ca419a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
@@ -76,6 +77,8 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "sched.h"
+#include "cxgb4_tc_u32.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -86,30 +89,6 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
-/* Host shadow copy of ingress filter entry. This is in host native format
- * and doesn't match the ordering or bit order, etc. of the hardware of the
- * firmware command. The use of bit-field structure elements is purely to
- * remind ourselves of the field size limitations and save memory in the case
- * where the filter table is large.
- */
-struct filter_entry {
- /* Administrative fields for filter.
- */
- u32 valid:1; /* filter allocated and valid */
- u32 locked:1; /* filter is administratively locked */
-
- u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
- struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
-
- /* The filter itself. Most of this is a straight copy of information
- * provided by the extended ioctl(). Some fields are translated to
- * internal forms -- for instance the Ingress Queue ID passed in from
- * the ioctl() is translated into the Absolute Ingress Queue ID.
- */
- struct ch_filter_specification fs;
-};
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -223,13 +202,8 @@ MODULE_PARM_DESC(select_queue,
static struct dentry *cxgb4_debugfs_root;
-static LIST_HEAD(adapter_list);
-static DEFINE_MUTEX(uld_mutex);
-/* Adapter list to be accessed from atomic context */
-static LIST_HEAD(adap_rcu_list);
-static DEFINE_SPINLOCK(adap_rcu_lock);
-static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
+LIST_HEAD(adapter_list);
+DEFINE_MUTEX(uld_mutex);
static void link_report(struct net_device *dev)
{
@@ -303,11 +277,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
txq->dcb_prio = value;
}
}
-#endif /* CONFIG_CHELSIO_T4_DCB */
-int cxgb4_dcb_enabled(const struct net_device *dev)
+static int cxgb4_dcb_enabled(const struct net_device *dev)
{
-#ifdef CONFIG_CHELSIO_T4_DCB
struct port_info *pi = netdev_priv(dev);
if (!pi->dcb.enabled)
@@ -315,11 +287,8 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
(pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
+#endif /* CONFIG_CHELSIO_T4_DCB */
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
@@ -531,66 +500,6 @@ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
-/* Clear a filter and release any of its resources that we own. This also
- * clears the filter's "pending" status.
- */
-static void clear_filter(struct adapter *adap, struct filter_entry *f)
-{
- /* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
- */
- if (f->l2t)
- cxgb4_l2t_release(f->l2t);
-
- /* The zeroing of the filter rule below clears the filter valid,
- * pending, locked flags, l2t pointer, etc. so it's all we need for
- * this operation.
- */
- memset(f, 0, sizeof(*f));
-}
-
-/* Handle a filter write/deletion reply.
- */
-static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
-{
- unsigned int idx = GET_TID(rpl);
- unsigned int nidx = idx - adap->tids.ftid_base;
- unsigned int ret;
- struct filter_entry *f;
-
- if (idx >= adap->tids.ftid_base && nidx <
- (adap->tids.nftids + adap->tids.nsftids)) {
- idx = nidx;
- ret = TCB_COOKIE_G(rpl->cookie);
- f = &adap->tids.ftid_tab[idx];
-
- if (ret == FW_FILTER_WR_FLT_DELETED) {
- /* Clear the filter when we get confirmation from the
- * hardware that the filter has been deleted.
- */
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- } else {
- /* Something went wrong. Issue a warning about the
- * problem and clear everything out.
- */
- dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
- idx, ret);
- clear_filter(adap, f);
- }
- }
-}
-
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
@@ -677,56 +586,6 @@ out:
return 0;
}
-/* Flush the aggregated lro sessions */
-static void uldrx_flush_handler(struct sge_rspq *q)
-{
- if (ulds[q->uld].lro_flush)
- ulds[q->uld].lro_flush(&q->lro_mgr);
-}
-
-/**
- * uldrx_handler - response queue handler for ULD queues
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the offload message
- * @gl: the gather list of packet fragments
- *
- * Deliver an ingress offload packet to a ULD. All processing is done by
- * the ULD, we just maintain statistics.
- */
-static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
-{
- struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
- int ret;
-
- /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
- */
- if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
- rsp += 2;
-
- if (q->flush_handler)
- ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl, &q->lro_mgr,
- &q->napi);
- else
- ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl);
-
- if (ret) {
- rxq->stats.nomem++;
- return -1;
- }
-
- if (gl == NULL)
- rxq->stats.imm++;
- else if (gl == CXGB4_MSG_AN)
- rxq->stats.an++;
- else
- rxq->stats.pkts++;
- return 0;
-}
-
static void disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
@@ -778,30 +637,12 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
-
- /* offload queues */
- for_each_iscsirxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
- adap->port[0]->name, i);
-
- for_each_iscsitrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
- adap->port[0]->name, i);
-
- for_each_rdmarxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
- adap->port[0]->name, i);
-
- for_each_rdmaciq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
- adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
- int iscsitqidx = 0;
+ int err, ethqidx;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -818,57 +659,9 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_iscsirxq(s, iscsiqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsirxq[iscsiqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_iscsitrxq(s, iscsitqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsitrxq[iscsitqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmarxq[rdmaqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmaciq(s, rdmaciqqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmaciq[rdmaciqqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
return 0;
unwind:
- while (--rdmaciqqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmaciq[rdmaciqqidx].rspq);
- while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmarxq[rdmaqidx].rspq);
- while (--iscsitqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsitrxq[iscsitqidx].rspq);
- while (--iscsiqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -884,16 +677,6 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_iscsirxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsirxq[i].rspq);
- for_each_iscsitrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsitrxq[i].rspq);
- for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
- for_each_rdmaciq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1032,28 +815,30 @@ static void enable_rx(struct adapter *adap)
}
}
-static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
- unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids, bool lro)
+
+static int setup_fw_sge_queues(struct adapter *adap)
{
- int i, err;
+ struct sge *s = &adap->sge;
+ int err = 0;
+
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
- for (i = 0; i < nq; i++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler,
- lro ? uldrx_flush_handler : NULL,
- 0);
+ if (adap->flags & USING_MSIX)
+ adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+ NULL, NULL, NULL, -1);
if (err)
return err;
- memset(&q->stats, 0, sizeof(q->stats));
- if (ids)
- ids[i] = q->rspq.abs_id;
+ adap->msi_idx = -((int)s->intrq.abs_id + 1);
}
- return 0;
+
+ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+ if (err)
+ t4_free_sge_resources(adap);
+ return err;
}
/**
@@ -1066,41 +851,10 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
*/
static int setup_sge_queues(struct adapter *adap)
{
- int err, msi_idx, i, j;
+ int err, i, j;
struct sge *s = &adap->sge;
-
- bitmap_zero(s->starving_fl, s->egr_sz);
- bitmap_zero(s->txq_maperr, s->egr_sz);
-
- if (adap->flags & USING_MSIX)
- msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
- err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL, NULL, -1);
- if (err)
- return err;
- msi_idx = -((int)s->intrq.abs_id + 1);
- }
-
- /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
- * don't forget to update the following which need to be
- * synchronized to and changes here.
- *
- * 1. The calculations of MAX_INGQ in cxgb4.h.
- *
- * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
- * to accommodate any new/deleted Ingress Queues
- * which need MSI-X Vectors.
- *
- * 3. Update sge_qinfo_show() to include information on the
- * new/deleted queues.
- */
- err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler, NULL, -1);
- if (err) {
-freeout: t4_free_sge_resources(adap);
- return err;
- }
+ struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ unsigned int cmplqid = 0;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -1109,10 +863,10 @@ freeout: t4_free_sge_resources(adap);
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
- if (msi_idx > 0)
- msi_idx++;
+ if (adap->msi_idx > 0)
+ adap->msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- msi_idx, &q->fl,
+ adap->msi_idx, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_mps_bg_map(adap,
@@ -1131,8 +885,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_iscsirxq(s, i) {
+ j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_ofldtxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1140,30 +894,15 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
- if (err) \
- goto freeout; \
- if (msi_idx > 0) \
- msi_idx += nq; \
-} while (0)
-
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
- ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
- j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
-
-#undef ALLOC_OFLD_RXQS
-
for_each_port(adap, i) {
- /*
- * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ /* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
*/
+ if (rxq_info)
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
- s->fw_evtq.cntxt_id,
- s->rdmarxq[i].rspq.cntxt_id);
+ s->fw_evtq.cntxt_id, cmplqid);
if (err)
goto freeout;
}
@@ -1174,6 +913,9 @@ freeout: t4_free_sge_resources(adap);
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
+freeout:
+ t4_free_sge_resources(adap);
+ return err;
}
/*
@@ -1197,151 +939,6 @@ void t4_free_mem(void *addr)
kvfree(addr);
}
-/* Send a Work Request to write the filter at a specified index. We construct
- * a Firmware Filter Work Request to have the work done and put the indicated
- * filter into "pending" mode which will prevent any further actions against
- * it till we get a reply from the firmware on the completion status of the
- * request.
- */
-static int set_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int ftid;
-
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- /* If the new filter requires loopback Destination MAC and/or VLAN
- * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
- * the filter.
- */
- if (f->fs.newdmac || f->fs.newvlan) {
- /* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
- f->fs.eport, f->fs.dmac);
- if (f->l2t == NULL) {
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
-
- ftid = adapter->tids.ftid_base + fidx;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
-
- /* It would be nice to put most of the following in t4_hw.c but most
- * of the work is translating the cxgbtool ch_filter_specification
- * into the Work Request and the definition of that structure is
- * currently in cxgbtool.h which isn't appropriate to pull into the
- * common code. We may eventually try to come up with a more neutral
- * filter specification structure but for now it's easiest to simply
- * put this fairly direct code in line ...
- */
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
- fwr->tid_to_iq =
- htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_RQTYPE_V(f->fs.type) |
- FW_FILTER_WR_NOREPLY_V(0) |
- FW_FILTER_WR_IQ_V(f->fs.iq));
- fwr->del_filter_to_l2tix =
- htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
- FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
- FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
- FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
- FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
- FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
- FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
- FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
- FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
- FW_FILTER_WR_PRIO_V(f->fs.prio) |
- FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
- fwr->ethtype = htons(f->fs.val.ethtype);
- fwr->ethtypem = htons(f->fs.mask.ethtype);
- fwr->frag_to_ovlan_vldm =
- (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
- FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
- FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
- FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
- fwr->rx_chan_rx_rpl_iq =
- htons(FW_FILTER_WR_RX_CHAN_V(0) |
- FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
- fwr->maci_to_matchtypem =
- htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
- FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
- FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
- FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
- FW_FILTER_WR_PORT_V(f->fs.val.iport) |
- FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
- FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
- FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
- fwr->ptcl = f->fs.val.proto;
- fwr->ptclm = f->fs.mask.proto;
- fwr->ttyp = f->fs.val.tos;
- fwr->ttypm = f->fs.mask.tos;
- fwr->ivlan = htons(f->fs.val.ivlan);
- fwr->ivlanm = htons(f->fs.mask.ivlan);
- fwr->ovlan = htons(f->fs.val.ovlan);
- fwr->ovlanm = htons(f->fs.mask.ovlan);
- memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
- memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
- memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
- memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
- fwr->lp = htons(f->fs.val.lport);
- fwr->lpm = htons(f->fs.mask.lport);
- fwr->fp = htons(f->fs.val.fport);
- fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
- t4_ofld_send(adapter, skb);
- return 0;
-}
-
-/* Delete the filter at a specified index.
- */
-static int del_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int len, ftid;
-
- len = sizeof(*fwr);
- ftid = adapter->tids.ftid_base + fidx;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
- t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- t4_mgmt_tx(adapter, skb);
- return 0;
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -1723,19 +1320,22 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
*/
static int tid_init(struct tid_info *t)
{
- size_t size;
- unsigned int stid_bmap_size;
- unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
+ unsigned int max_ftids = t->nftids + t->nsftids;
+ unsigned int natids = t->natids;
+ unsigned int stid_bmap_size;
+ unsigned int ftid_bmap_size;
+ size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ ftid_bmap_size = BITS_TO_LONGS(t->nftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
- t->nftids * sizeof(*t->ftid_tab) +
- t->nsftids * sizeof(*t->ftid_tab);
+ max_ftids * sizeof(*t->ftid_tab) +
+ ftid_bmap_size * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
@@ -1745,8 +1345,10 @@ static int tid_init(struct tid_info *t)
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
+ spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
t->sftids_in_use = 0;
@@ -1761,12 +1363,16 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
- /* Reserve stid 0 for T4/T5 adapters */
- if (!t->stid_base &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- __set_bit(0, t->stid_bmap);
+ if (is_offload(adap)) {
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ __set_bit(0, t->stid_bmap);
+ }
+
+ bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -2316,7 +1922,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2328,7 +1934,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2336,9 +1942,10 @@ static void enable_dbs(struct adapter *adap)
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
+ enum cxgb4_uld type = CXGB4_ULD_RDMA;
+
+ if (adap->uld && adap->uld[type].handle)
+ adap->uld[type].control(adap->uld[type].handle, cmd);
}
static void process_db_full(struct work_struct *work)
@@ -2392,13 +1999,14 @@ out:
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
+
static void recover_all_queues(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2463,94 +2071,12 @@ void t4_db_dropped(struct adapter *adap)
queue_work(adap->workq, &adap->db_drop_task);
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
-{
- void *handle;
- struct cxgb4_lld_info lli;
- unsigned short i;
-
- lli.pdev = adap->pdev;
- lli.pf = adap->pf;
- lli.l2t = adap->l2t;
- lli.tids = &adap->tids;
- lli.ports = adap->port;
- lli.vr = &adap->vres;
- lli.mtus = adap->params.mtus;
- if (uld == CXGB4_ULD_RDMA) {
- lli.rxq_ids = adap->sge.rdma_rxq;
- lli.ciq_ids = adap->sge.rdma_ciq;
- lli.nrxq = adap->sge.rdmaqs;
- lli.nciq = adap->sge.rdmaciqs;
- } else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.iscsi_rxq;
- lli.nrxq = adap->sge.iscsiqsets;
- } else if (uld == CXGB4_ULD_ISCSIT) {
- lli.rxq_ids = adap->sge.iscsit_rxq;
- lli.nrxq = adap->sge.niscsitq;
- }
- lli.ntxq = adap->sge.iscsiqsets;
- lli.nchan = adap->params.nports;
- lli.nports = adap->params.nports;
- lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
- lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
- lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
- lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
- lli.iscsi_ppm = &adap->iscsi_ppm;
- lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << adap->params.sge.eq_qpp;
- lli.ucq_density = 1 << adap->params.sge.iq_qpp;
- lli.filt_mode = adap->params.tp.vlan_pri_map;
- /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
- for (i = 0; i < NCHAN; i++)
- lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
- lli.fw_vers = adap->params.fw_vers;
- lli.dbfifo_int_thresh = dbfifo_int_thresh;
- lli.sge_ingpadboundary = adap->sge.fl_align;
- lli.sge_egrstatuspagesize = adap->sge.stat_len;
- lli.sge_pktshift = adap->sge.pktshift;
- lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
- lli.max_ordird_qp = adap->params.max_ordird_qp;
- lli.max_ird_adapter = adap->params.max_ird_adapter;
- lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
- lli.nodeid = dev_to_node(adap->pdev_dev);
-
- handle = ulds[uld].add(&lli);
- if (IS_ERR(handle)) {
- dev_warn(adap->pdev_dev,
- "could not attach to the %s driver, error %ld\n",
- uld_str[uld], PTR_ERR(handle));
- return;
- }
-
- adap->uld_handle[uld] = handle;
-
+void t4_register_netevent_notifier(void)
+{
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
-
- if (adap->flags & FULL_INIT_DONE)
- ulds[uld].state_change(handle, CXGB4_STATE_UP);
-}
-
-static void attach_ulds(struct adapter *adap)
-{
- unsigned int i;
-
- spin_lock(&adap_rcu_lock);
- list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
- spin_unlock(&adap_rcu_lock);
-
- mutex_lock(&uld_mutex);
- list_add_tail(&adap->list_node, &adapter_list);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (ulds[i].add)
- uld_attach(adap, i);
- mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
@@ -2560,20 +2086,16 @@ static void detach_ulds(struct adapter *adap)
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i]) {
- ulds[i].state_change(adap->uld_handle[i],
+ if (adap->uld && adap->uld[i].handle) {
+ adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH);
- adap->uld_handle[i] = NULL;
+ adap->uld[i].handle = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
-
- spin_lock(&adap_rcu_lock);
- list_del_rcu(&adap->rcu_node);
- spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -2582,60 +2104,11 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i])
- ulds[i].state_change(adap->uld_handle[i], new_state);
- mutex_unlock(&uld_mutex);
-}
-
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
- *
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
- */
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
-{
- int ret = 0;
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- if (ulds[type].add) {
- ret = -EBUSY;
- goto out;
- }
- ulds[type] = *p;
- list_for_each_entry(adap, &adapter_list, list_node)
- uld_attach(adap, type);
-out: mutex_unlock(&uld_mutex);
- return ret;
-}
-EXPORT_SYMBOL(cxgb4_register_uld);
-
-/**
- * cxgb4_unregister_uld - unregister an upper-layer driver
- * @type: the ULD type
- *
- * Unregisters an existing upper-layer driver.
- */
-int cxgb4_unregister_uld(enum cxgb4_uld type)
-{
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node)
- adap->uld_handle[type] = NULL;
- ulds[type].add = NULL;
+ if (adap->uld && adap->uld[i].handle)
+ adap->uld[i].state_change(adap->uld[i].handle,
+ new_state);
mutex_unlock(&uld_mutex);
- return 0;
}
-EXPORT_SYMBOL(cxgb4_unregister_uld);
#if IS_ENABLED(CONFIG_IPV6)
static int cxgb4_inet6addr_handler(struct notifier_block *this,
@@ -2741,7 +2214,6 @@ static int cxgb_up(struct adapter *adap)
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
-
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
@@ -2819,40 +2291,6 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
-/* Return an error number if the indicated filter isn't writable ...
- */
-static int writable_filter(struct filter_entry *f)
-{
- if (f->locked)
- return -EPERM;
- if (f->pending)
- return -EBUSY;
-
- return 0;
-}
-
-/* Delete the filter at the specified index (if valid). The checks for all
- * the common problems with doing this like the filter being locked, currently
- * pending in another operation, etc.
- */
-static int delete_filter(struct adapter *adapter, unsigned int fidx)
-{
- struct filter_entry *f;
- int ret;
-
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
- return -EINVAL;
-
- f = &adapter->tids.ftid_tab[fidx];
- ret = writable_filter(f);
- if (ret)
- return ret;
- if (f->valid)
- return del_filter_wr(adapter, fidx);
-
- return 0;
-}
-
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
@@ -2922,7 +2360,6 @@ EXPORT_SYMBOL(cxgb4_create_server_filter);
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6)
{
- int ret;
struct filter_entry *f;
struct adapter *adap;
@@ -2936,11 +2373,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
/* Unlock the filter */
f->locked = 0;
- ret = delete_filter(adap, stid);
- if (ret)
- return ret;
-
- return 0;
+ return delete_filter(adap, stid);
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);
@@ -3078,6 +2511,85 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+#ifdef CONFIG_PCI_IOV
+static int dummy_open(struct net_device *dev)
+{
+ /* Turn carrier off since we don't have to transmit anything on this
+ * interface.
+ */
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/* Fill MAC address that will be assigned by the FW */
+static void fill_vf_station_mac_addr(struct adapter *adap)
+{
+ unsigned int i;
+ u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ int err;
+ u8 *na;
+ u16 a, b;
+
+ err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
+ if (!err) {
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (i = 0; i < adap->num_vfs; i++) {
+ macaddr[5] = adap->pf * 16 + i;
+ ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
+ }
+ }
+}
+
+static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ /* verify MAC addr is valid */
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(pi->adapter->pdev_dev,
+ "Invalid Ethernet address %pM for VF %d\n",
+ mac, vf);
+ return -EINVAL;
+ }
+
+ dev_info(pi->adapter->pdev_dev,
+ "Setting MAC %pM on VF %d\n", mac, vf);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ if (!ret)
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
+ return ret;
+}
+
+static int cxgb_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
+ return 0;
+}
+#endif
+
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
int ret;
@@ -3114,6 +2626,116 @@ static void cxgb_netpoll(struct net_device *dev)
}
#endif
+static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ struct ch_sched_params p;
+ struct ch_sched_queue qe;
+ u32 req_rate;
+ int err = 0;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (index < 0 || index > pi->nqsets - 1)
+ return -EINVAL;
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to rate limit on queue %d. Link Down?\n",
+ index);
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to Kbps */
+ req_rate = rate << 10;
+
+ /* Max rate is 10 Gbps */
+ if (req_rate >= SCHED_MAX_RATE_KBPS) {
+ dev_err(adap->pdev_dev,
+ "Invalid rate %u Mbps, Max rate is %u Gbps\n",
+ rate, SCHED_MAX_RATE_KBPS);
+ return -ERANGE;
+ }
+
+ /* First unbind the queue from any existing class */
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = index;
+ qe.class = SCHED_CLS_NONE;
+
+ err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
+ if (err) {
+ dev_err(adap->pdev_dev,
+ "Unbinding Queue %d on port %d fail. Err: %d\n",
+ index, pi->port_id, err);
+ return err;
+ }
+
+ /* Queue already unbound */
+ if (!req_rate)
+ return 0;
+
+ /* Fetch any available unused or matching scheduling class */
+ memset(&p, 0, sizeof(p));
+ p.type = SCHED_CLASS_TYPE_PACKET;
+ p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
+ p.u.params.mode = SCHED_CLASS_MODE_CLASS;
+ p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
+ p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
+ p.u.params.channel = pi->tx_chan;
+ p.u.params.class = SCHED_CLS_NONE;
+ p.u.params.minrate = 0;
+ p.u.params.maxrate = req_rate;
+ p.u.params.weight = 0;
+ p.u.params.pktsize = dev->mtu;
+
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e)
+ return -ENOMEM;
+
+ /* Bind the queue to a scheduling class */
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = index;
+ qe.class = e->idx;
+
+ err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
+ if (err)
+ dev_err(adap->pdev_dev,
+ "Queue rate limiting failed. Err: %d\n", err);
+ return err;
+}
+
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+ tc->type == TC_SETUP_CLSU32) {
+ switch (tc->cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return cxgb4_config_knode(dev, proto, tc->cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return cxgb4_delete_knode(dev, proto, tc->cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -3136,7 +2758,31 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = cxgb_busy_poll,
#endif
+ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
+ .ndo_setup_tc = cxgb_setup_tc,
+};
+
+#ifdef CONFIG_PCI_IOV
+static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
+ .ndo_open = dummy_open,
+ .ndo_set_vf_mac = cxgb_set_vf_mac,
+ .ndo_get_vf_config = cxgb_get_vf_config,
+};
+#endif
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strlcpy(info->version, cxgb4_driver_version,
+ sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+}
+static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
+ .get_drvinfo = get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -3979,6 +3625,12 @@ static int adap_init0(struct adapter *adap)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
+ /* We don't yet have a PARAMs calls to retrieve the number of Traffic
+ * Classes supported by the hardware/firmware so we hard code it here
+ * for now.
+ */
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -4067,6 +3719,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
+ adap->num_ofld_uld += 1;
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -4119,6 +3772,7 @@ static int adap_init0(struct adapter *adap)
"max_ordird_qp %d max_ird_adapter %d\n",
adap->params.max_ordird_qp,
adap->params.max_ird_adapter);
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -4129,6 +3783,13 @@ static int adap_init0(struct adapter *adap)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
+ /* LIO target and cxgb4i initiaitor */
+ adap->num_ofld_uld += 2;
+ }
+ if (caps_cmd.cryptocaps) {
+ /* Should query params here...TODO */
+ adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
@@ -4318,16 +3979,6 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt,
- unsigned int size, unsigned int iqe_size)
-{
- q->adap = adap;
- cxgb4_set_rspq_intr_params(q, us, cnt);
- q->iqe_len = iqe_size;
- q->size = size;
-}
-
/*
* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
@@ -4340,12 +3991,16 @@ static void cfg_queues(struct adapter *adap)
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
- if (is_kdump_kernel())
+ if (is_kdump_kernel()) {
adap->params.offload = 0;
+ adap->params.crypto = 0;
+ } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ }
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -4389,33 +4044,18 @@ static void cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
- if (is_offload(adap)) {
+ if (is_uld(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->iscsirxq),
- num_online_cpus());
- s->iscsiqsets = roundup(i, adap->params.nports);
- } else
- s->iscsiqsets = adap->params.nports;
- /* For RDMA one Rx queue per channel suffices */
- s->rdmaqs = adap->params.nports;
- /* Try and allow at least 1 CIQ per cpu rounding down
- * to the number of ports, with a minimum of 1 per port.
- * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
- * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
- * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
- */
- s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
- s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
- adap->params.nports;
- s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = s->iscsiqsets;
+ i = num_online_cpus();
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else {
+ s->ofldqsets = adap->params.nports;
+ }
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4434,47 +4074,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsirxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSI;
- r->fl.size = 72;
- }
-
- if (!is_t4(adap->params.chip)) {
- for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsitrxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSIT;
- r->fl.size = 72;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
- struct sge_ofld_rxq *r = &s->rdmarxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 511, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- r->fl.size = 72;
- }
-
- ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
- if (ciq_size > SGE_MAX_IQ_SIZE) {
- CH_WARN(adap, "CIQ size too small for available IQs\n");
- ciq_size = SGE_MAX_IQ_SIZE;
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
- struct sge_ofld_rxq *r = &s->rdmaciq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- }
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
- init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
/*
@@ -4505,42 +4106,90 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
+static int get_msix_info(struct adapter *adap)
+{
+ struct uld_msix_info *msix_info;
+ unsigned int max_ingq = 0;
+
+ if (is_offload(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
+ if (is_pci_uld(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_uld;
+
+ if (!max_ingq)
+ goto out;
+
+ msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
+ if (!msix_info)
+ return -ENOMEM;
+
+ adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->msix_bmap_ulds.msix_bmap) {
+ kfree(msix_info);
+ return -ENOMEM;
+ }
+ spin_lock_init(&adap->msix_bmap_ulds.lock);
+ adap->msix_info_ulds = msix_info;
+out:
+ return 0;
+}
+
+static void free_msix_info(struct adapter *adap)
+{
+ if (!(adap->num_uld && adap->num_ofld_uld))
+ return;
+
+ kfree(adap->msix_info_ulds);
+ kfree(adap->msix_bmap_ulds.msix_bmap);
+}
+
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2
static int enable_msix(struct adapter *adap)
{
- int ofld_need = 0;
- int i, want, need, allocated;
+ int ofld_need = 0, uld_need = 0;
+ int i, j, want, need, allocated;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
+ int max_ingq = MAX_INGQ;
- entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+ if (is_pci_uld(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_offload(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
+ entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < MAX_INGQ + 1; ++i)
+ /* map for msix */
+ if (get_msix_info(adap)) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ }
+
+ for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
- s->niscsitq;
- /* need nchan for each possible ULD */
- if (is_t4(adap->params.chip))
- ofld_need = 3 * nchan;
- else
- ofld_need = 4 * nchan;
+ want += adap->num_ofld_uld * s->ofldqsets;
+ ofld_need = adap->num_ofld_uld * nchan;
+ }
+ if (is_pci_uld(adap)) {
+ want += adap->num_uld * s->ofldqsets;
+ uld_need = adap->num_uld * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
+ need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#else
- need = adap->params.nports + EXTRA_VECS + ofld_need;
+ need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#endif
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
@@ -4554,33 +4203,31 @@ static int enable_msix(struct adapter *adap)
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
- i = allocated - EXTRA_VECS - ofld_need;
+ i = allocated - EXTRA_VECS - ofld_need - uld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
- if (is_offload(adap)) {
- if (allocated < want) {
- s->rdmaqs = nchan;
- s->rdmaciqs = nchan;
+ if (is_uld(adap)) {
+ if (allocated < want)
+ s->nqs_per_uld = nchan;
+ else
+ s->nqs_per_uld = s->ofldqsets;
+ }
- if (!is_t4(adap->params.chip))
- s->niscsitq = nchan;
+ for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ if (is_uld(adap)) {
+ for (j = 0 ; i < allocated; ++i, j++) {
+ adap->msix_info_ulds[j].vec = entries[i].vector;
+ adap->msix_info_ulds[j].idx = i;
}
-
- /* leftovers go to OFLD */
- i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs - s->niscsitq;
- s->iscsiqsets = (i / nchan) * nchan; /* round down */
-
+ adap->msix_bmap_ulds.mapsize = j;
}
- for (i = 0; i < allocated; ++i)
- adap->msix_info[i].vec = entries[i].vector;
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
- allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
- s->rdmaciqs);
+ "nic %d per uld %d\n",
+ allocated, s->max_ethqsets, s->nqs_per_uld);
kfree(entries);
return 0;
@@ -4794,7 +4441,9 @@ static void free_some_resources(struct adapter *adapter)
unsigned int i;
t4_free_mem(adapter->l2t);
+ t4_cleanup_sched(adapter);
t4_free_mem(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
@@ -4845,21 +4494,59 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
+static void dummy_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_NONE;
+ dev->mtu = 0;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ /* Initialize the device structure. */
+ dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
+ dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
+ dev->destructor = free_netdev;
+}
+
+static int config_mgmt_dev(struct pci_dev *pdev)
+{
+ struct adapter *adap = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ int err;
+
+ snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
+ netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return err;
+ }
+ return 0;
+}
+
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
+ struct adapter *adap = pci_get_drvdata(pdev);
int err = 0;
int current_vfs = pci_num_vf(pdev);
u32 pcie_fw;
- void __iomem *regs;
-
- regs = pci_ioremap_bar(pdev, 0);
- if (!regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- return -ENOMEM;
- }
- pcie_fw = readl(regs + PCIE_FW_A);
- iounmap(regs);
+ pcie_fw = readl(adap->regs + PCIE_FW_A);
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
@@ -4886,6 +4573,14 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
*/
if (!num_vfs) {
pci_disable_sriov(pdev);
+ if (adap->port[0]) {
+ unregister_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ }
+ /* free VF resources */
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ adap->num_vfs = 0;
return num_vfs;
}
@@ -4893,7 +4588,17 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err)
return err;
+
+ adap->num_vfs = num_vfs;
+ err = config_mgmt_dev(pdev);
+ if (err)
+ return err;
}
+
+ adap->vfinfo = kcalloc(adap->num_vfs,
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (adap->vfinfo)
+ fill_vf_station_mac_addr(adap);
return num_vfs;
}
#endif
@@ -4904,9 +4609,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
+ struct net_device *netdev;
void __iomem *regs;
u32 whoami, pl_rev;
enum chip_type chip;
+ static int adap_idx = 1;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4941,7 +4648,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
+#ifndef CONFIG_PCI_IOV
iounmap(regs);
+#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
@@ -4973,6 +4682,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_unmap_bar0;
}
+ adap_idx++;
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
@@ -5059,8 +4769,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
T6_STATMODE_V(0)));
for_each_port(adapter, i) {
- struct net_device *netdev;
-
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
if (!netdev) {
@@ -5080,7 +4788,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_TC;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5154,10 +4863,26 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
#endif
- if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
+ if (!pi->sched_tbl)
+ dev_warn(&pdev->dev,
+ "could not activate scheduling on port %d\n",
+ i);
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
+ CXGB4_MAX_LINK_HANDLE);
+ if (!adapter->tc_u32)
+ dev_warn(&pdev->dev,
+ "could not offload tc u32, continuing\n");
}
if (is_offload(adapter)) {
@@ -5179,8 +4904,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
- else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ else if (msi > 0 && pci_enable_msi(pdev) == 0) {
adapter->flags |= USING_MSI;
+ if (msi > 1)
+ free_msix_info(adapter);
+ }
/* check for PCI Express bandwidth capabiltites */
cxgb4_check_pcie_caps(adapter);
@@ -5224,10 +4952,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_offload(adapter))
- attach_ulds(adapter);
+ if (is_uld(adapter)) {
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adapter->list_node, &adapter_list);
+ mutex_unlock(&uld_mutex);
+ }
print_adapter_info(adapter);
+ setup_fw_sge_queues(adapter);
+ return 0;
sriov:
#ifdef CONFIG_PCI_IOV
@@ -5241,11 +4974,48 @@ sriov:
"instantiated %u virtual functions\n",
num_vf[func]);
}
-#endif
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto free_pci_region;
+ }
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->regs = regs;
+ adapter->adap_idx = adap_idx;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto free_adapter;
+ }
+ pci_set_drvdata(pdev, adapter);
+ return 0;
+
+ free_adapter:
+ kfree(adapter);
+ free_pci_region:
+ iounmap(regs);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ return err;
+#else
return 0;
+#endif
out_free_dev:
free_some_resources(adapter);
+ if (adapter->flags & USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -5269,12 +5039,12 @@ static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
-#ifdef CONFIG_PCI_IOV
- pci_disable_sriov(pdev);
-
-#endif
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
- if (adapter) {
+ if (adapter->pf == 4) {
int i;
/* Tear down per-adapter Work Queue first since it can contain
@@ -5282,7 +5052,7 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_offload(adapter))
+ if (is_uld(adapter))
detach_ulds(adapter);
disable_interrupts(adapter);
@@ -5296,17 +5066,15 @@ static void remove_one(struct pci_dev *pdev)
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
- if (adapter->tids.ftid_tab) {
- struct filter_entry *f = &adapter->tids.ftid_tab[0];
- for (i = 0; i < (adapter->tids.nftids +
- adapter->tids.nsftids); i++, f++)
- if (f->valid)
- clear_filter(adapter, f);
- }
+ clear_all_filters(adapter);
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
+ if (adapter->flags & USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
free_some_resources(adapter);
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
@@ -5323,8 +5091,64 @@ static void remove_one(struct pci_dev *pdev)
kfree(adapter->mbox_log);
synchronize_rcu();
kfree(adapter);
- } else
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ }
+#endif
+}
+
+/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
+ * delivery. This is essentially a stripped down version of the PCI remove()
+ * function where we do the minimal amount of work necessary to shutdown any
+ * further activity.
+ */
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /* As with remove_one() above (see extended comment), we only want do
+ * do cleanup on PCI Devices which went all the way through init_one()
+ * ...
+ */
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
+
+ if (adapter->pf == 4) {
+ int i;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
+ disable_interrupts(adapter);
+ disable_msi(adapter);
+
+ t4_sge_stop(adapter);
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
pci_release_regions(pdev);
+ }
+#endif
}
static struct pci_driver cxgb4_driver = {
@@ -5332,7 +5156,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
- .shutdown = remove_one,
+ .shutdown = shutdown_one,
#ifdef CONFIG_PCI_IOV
.sriov_configure = cxgb4_iov_configure,
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
new file mode 100644
index 000000000000..49d2debb334e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -0,0 +1,483 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "cxgb4.h"
+#include "cxgb4_tc_u32_parse.h"
+#include "cxgb4_tc_u32.h"
+
+/* Fill ch_filter_specification with parsed match value/mask pair. */
+static int fill_match_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls,
+ const struct cxgb4_match_field *entry,
+ bool next_header)
+{
+ unsigned int i, j;
+ u32 val, mask;
+ int off, err;
+ bool found;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ mask = cls->knode.sel->keys[i].mask;
+
+ if (next_header) {
+ /* For next headers, parse only keys with offmask */
+ if (!cls->knode.sel->keys[i].offmask)
+ continue;
+ } else {
+ /* For the remaining, parse only keys without offmask */
+ if (cls->knode.sel->keys[i].offmask)
+ continue;
+ }
+
+ found = false;
+
+ for (j = 0; entry[j].val; j++) {
+ if (off == entry[j].off) {
+ found = true;
+ err = entry[j].val(fs, val, mask);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Fill ch_filter_specification with parsed action. */
+static int fill_action_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls)
+{
+ unsigned int num_actions = 0;
+ const struct tc_action *a;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Don't allow more than one action per rule. */
+ if (num_actions)
+ return -EINVAL;
+
+ /* Drop in hardware. */
+ if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ num_actions++;
+ continue;
+ }
+
+ /* Re-direct to specified port in hardware. */
+ if (is_tcf_mirred_redirect(a)) {
+ struct net_device *n_dev;
+ unsigned int i, index;
+ bool found = false;
+
+ index = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (index == n_dev->ifindex) {
+ fs->action = FILTER_SWITCH;
+ fs->eport = i;
+ found = true;
+ break;
+ }
+ }
+
+ /* Interface doesn't belong to any port of
+ * the underlying hardware.
+ */
+ if (!found)
+ return -EINVAL;
+
+ num_actions++;
+ continue;
+ }
+
+ /* Un-supported action. */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ const struct cxgb4_match_field *start, *link_start = NULL;
+ struct adapter *adapter = netdev2adap(dev);
+ struct ch_filter_specification fs;
+ struct cxgb4_tc_u32_table *t;
+ struct cxgb4_link *link;
+ unsigned int filter_id;
+ u32 uhtid, link_uhtid;
+ bool is_ipv6 = false;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to insert the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for insertion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Ensure link handle uhtid is sane, if specified. */
+ if (link_uhtid >= t->size)
+ return -EINVAL;
+
+ memset(&fs, 0, sizeof(fs));
+
+ if (protocol == htons(ETH_P_IPV6)) {
+ start = cxgb4_ipv6_fields;
+ is_ipv6 = true;
+ } else {
+ start = cxgb4_ipv4_fields;
+ is_ipv6 = false;
+ }
+
+ if (uhtid != 0x800) {
+ /* Link must exist from root node before insertion. */
+ if (!t->table[uhtid - 1].link_handle)
+ return -EINVAL;
+
+ /* Link must have a valid supported next header. */
+ link_start = t->table[uhtid - 1].match_field;
+ if (!link_start)
+ return -EINVAL;
+ }
+
+ /* Parse links and record them for subsequent jumps to valid
+ * next headers.
+ */
+ if (link_uhtid) {
+ const struct cxgb4_next_header *next;
+ bool found = false;
+ unsigned int i, j;
+ u32 val, mask;
+ int off;
+
+ if (t->table[link_uhtid - 1].link_handle) {
+ dev_err(adapter->pdev_dev,
+ "Link handle exists for: 0x%x\n",
+ link_uhtid);
+ return -EINVAL;
+ }
+
+ next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
+
+ /* Try to find matches that allow jumps to next header. */
+ for (i = 0; next[i].jump; i++) {
+ if (next[i].offoff != cls->knode.sel->offoff ||
+ next[i].shift != cls->knode.sel->offshift ||
+ next[i].mask != cls->knode.sel->offmask ||
+ next[i].offset != cls->knode.sel->off)
+ continue;
+
+ /* Found a possible candidate. Find a key that
+ * matches the corresponding offset, value, and
+ * mask to jump to next header.
+ */
+ for (j = 0; j < cls->knode.sel->nkeys; j++) {
+ off = cls->knode.sel->keys[j].off;
+ val = cls->knode.sel->keys[j].val;
+ mask = cls->knode.sel->keys[j].mask;
+
+ if (next[i].match_off == off &&
+ next[i].match_val == val &&
+ next[i].match_mask == mask) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue; /* Try next candidate. */
+
+ /* Candidate to jump to next header found.
+ * Translate all keys to internal specification
+ * and store them in jump table. This spec is copied
+ * later to set the actual filters.
+ */
+ ret = fill_match_fields(adapter, &fs, cls,
+ start, false);
+ if (ret)
+ goto out;
+
+ link = &t->table[link_uhtid - 1];
+ link->match_field = next[i].jump;
+ link->link_handle = cls->knode.handle;
+ memcpy(&link->fs, &fs, sizeof(fs));
+ break;
+ }
+
+ /* No candidate found to jump to next header. */
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* Fill ch_filter_specification match fields to be shipped to hardware.
+ * Copy the linked spec (if any) first. And then update the spec as
+ * needed.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
+ /* Copy linked ch_filter_specification */
+ memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
+ ret = fill_match_fields(adapter, &fs, cls,
+ link_start, true);
+ if (ret)
+ goto out;
+ }
+
+ ret = fill_match_fields(adapter, &fs, cls, start, false);
+ if (ret)
+ goto out;
+
+ /* Fill ch_filter_specification action fields to be shipped to
+ * hardware.
+ */
+ ret = fill_action_fields(adapter, &fs, cls);
+ if (ret)
+ goto out;
+
+ /* The filter spec has been completely built from the info
+ * provided from u32. We now set some default fields in the
+ * spec for sanity.
+ */
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs.val.iport = netdev2pinfo(dev)->port_id;
+ fs.mask.iport = ~0;
+
+ /* Enable filter hit counts. */
+ fs.hitcnts = 1;
+
+ /* Set type of filter - IPv6 or IPv4 */
+ fs.type = is_ipv6 ? 1 : 0;
+
+ /* Set the filter */
+ ret = cxgb4_set_filter(dev, filter_id, &fs);
+ if (ret)
+ goto out;
+
+ /* If this is a linked bucket, then set the corresponding
+ * entry in the bitmap to mark it as belonging to this linked
+ * bucket.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
+ set_bit(filter_id, t->table[uhtid - 1].tid_map);
+
+out:
+ return ret;
+}
+
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int filter_id, max_tids, i, j;
+ struct cxgb4_link *link = NULL;
+ struct cxgb4_tc_u32_table *t;
+ u32 handle, uhtid;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to delete the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for deletion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ handle = cls->knode.handle;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Delete the specified filter */
+ if (uhtid != 0x800) {
+ link = &t->table[uhtid - 1];
+ if (!link->link_handle)
+ return -EINVAL;
+
+ if (!test_bit(filter_id, link->tid_map))
+ return -EINVAL;
+ }
+
+ ret = cxgb4_del_filter(dev, filter_id);
+ if (ret)
+ goto out;
+
+ if (link)
+ clear_bit(filter_id, link->tid_map);
+
+ /* If a link is being deleted, then delete all filters
+ * associated with the link.
+ */
+ max_tids = adapter->tids.nftids;
+ for (i = 0; i < t->size; i++) {
+ link = &t->table[i];
+
+ if (link->link_handle == handle) {
+ for (j = 0; j < max_tids; j++) {
+ if (!test_bit(j, link->tid_map))
+ continue;
+
+ ret = __cxgb4_del_filter(dev, j, NULL);
+ if (ret)
+ goto out;
+
+ clear_bit(j, link->tid_map);
+ }
+
+ /* Clear the link state */
+ link->match_field = NULL;
+ link->link_handle = 0;
+ memset(&link->fs, 0, sizeof(link->fs));
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+void cxgb4_cleanup_tc_u32(struct adapter *adap)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!adap->tc_u32)
+ return;
+
+ /* Free up all allocated memory. */
+ t = adap->tc_u32;
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ t4_free_mem(link->tid_map);
+ }
+ t4_free_mem(adap->tc_u32);
+}
+
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!size)
+ return NULL;
+
+ t = t4_alloc_mem(sizeof(*t) +
+ (size * sizeof(struct cxgb4_link)));
+ if (!t)
+ return NULL;
+
+ t->size = size;
+
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+ unsigned int bmap_size;
+ unsigned int max_tids;
+
+ max_tids = adap->tids.nftids;
+ bmap_size = BITS_TO_LONGS(max_tids);
+ link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ if (!link->tid_map)
+ goto out_no_mem;
+ bitmap_zero(link->tid_map, max_tids);
+ }
+
+ return t;
+
+out_no_mem:
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ if (link->tid_map)
+ t4_free_mem(link->tid_map);
+ }
+
+ if (t)
+ t4_free_mem(t);
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
new file mode 100644
index 000000000000..6bdc885eff22
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_H
+#define __CXGB4_TC_U32_H
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_MAX_LINK_HANDLE 32
+
+static inline bool can_tc_u32_offload(struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+
+void cxgb4_cleanup_tc_u32(struct adapter *adapter);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size);
+#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
new file mode 100644
index 000000000000..a4b99edcc339
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -0,0 +1,294 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_PARSE_H
+#define __CXGB4_TC_U32_PARSE_H
+
+struct cxgb4_match_field {
+ int off; /* Offset from the beginning of the header to match */
+ /* Fill the value/mask pair in the spec if matched */
+ int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+};
+
+/* IPv4 match fields */
+static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ u32 mask_val;
+ u8 frag_val;
+
+ frag_val = (ntohl(val) >> 13) & 0x00000007;
+ mask_val = ntohl(mask) & 0x0000FFFF;
+
+ if (frag_val == 0x1 && mask_val != 0x3FFF) { /* MF set */
+ f->val.frag = 1;
+ f->mask.frag = 1;
+ } else if (frag_val == 0x2 && mask_val != 0x3FFF) { /* DF set */
+ f->val.frag = 0;
+ f->mask.frag = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv4_tos },
+ { .off = 4, .val = cxgb4_fill_ipv4_frag },
+ { .off = 8, .val = cxgb4_fill_ipv4_proto },
+ { .off = 12, .val = cxgb4_fill_ipv4_src_ip },
+ { .off = 16, .val = cxgb4_fill_ipv4_dst_ip },
+ { .val = NULL }
+};
+
+/* IPv6 match fields */
+static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[4], &val, sizeof(u32));
+ memcpy(&f->mask.fip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[8], &val, sizeof(u32));
+ memcpy(&f->mask.fip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[12], &val, sizeof(u32));
+ memcpy(&f->mask.fip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[4], &val, sizeof(u32));
+ memcpy(&f->mask.lip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[8], &val, sizeof(u32));
+ memcpy(&f->mask.lip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[12], &val, sizeof(u32));
+ memcpy(&f->mask.lip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv6_tos },
+ { .off = 4, .val = cxgb4_fill_ipv6_proto },
+ { .off = 8, .val = cxgb4_fill_ipv6_src_ip0 },
+ { .off = 12, .val = cxgb4_fill_ipv6_src_ip1 },
+ { .off = 16, .val = cxgb4_fill_ipv6_src_ip2 },
+ { .off = 20, .val = cxgb4_fill_ipv6_src_ip3 },
+ { .off = 24, .val = cxgb4_fill_ipv6_dst_ip0 },
+ { .off = 28, .val = cxgb4_fill_ipv6_dst_ip1 },
+ { .off = 32, .val = cxgb4_fill_ipv6_dst_ip2 },
+ { .off = 36, .val = cxgb4_fill_ipv6_dst_ip3 },
+ { .val = NULL }
+};
+
+/* TCP/UDP match */
+static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.fport = ntohl(val) >> 16;
+ f->mask.fport = ntohl(mask) >> 16;
+ f->val.lport = ntohl(val) & 0x0000FFFF;
+ f->mask.lport = ntohl(mask) & 0x0000FFFF;
+
+ return 0;
+};
+
+static const struct cxgb4_match_field cxgb4_tcp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+static const struct cxgb4_match_field cxgb4_udp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+struct cxgb4_next_header {
+ unsigned int offset; /* Offset to next header */
+ /* offset, shift, and mask added to offset above
+ * to get to next header. Useful when using a header
+ * field's value to jump to next header such as IHL field
+ * in IPv4 header.
+ */
+ unsigned int offoff;
+ u32 shift;
+ u32 mask;
+ /* match criteria to make this jump */
+ unsigned int match_off;
+ u32 match_val;
+ u32 match_mask;
+ /* location of jump to make */
+ const struct cxgb4_match_field *jump;
+};
+
+/* Accept a rule with a jump to transport layer header based on IHL field in
+ * IPv4 header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
+ * to get to transport layer header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+struct cxgb4_link {
+ const struct cxgb4_match_field *match_field; /* Next header */
+ struct ch_filter_specification fs; /* Match spec associated with link */
+ u32 link_handle; /* Knode handle associated with the link */
+ unsigned long *tid_map; /* Bitmap for filter tids */
+};
+
+struct cxgb4_tc_u32_table {
+ unsigned int size; /* number of entries in table */
+ struct cxgb4_link table[0]; /* Jump table */
+};
+#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
new file mode 100644
index 000000000000..b4b2d20aab3c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -0,0 +1,696 @@
+/*
+ * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+#include "t4_msg.h"
+
+#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
+
+static int get_msix_idx_from_bmap(struct adapter *adap)
+{
+ struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
+ unsigned long flags;
+ unsigned int msix_idx;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
+ if (msix_idx < bmap->mapsize) {
+ __set_bit(msix_idx, bmap->msix_bmap);
+ } else {
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return -ENOSPC;
+ }
+
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return msix_idx;
+}
+
+static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
+{
+ struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ __clear_bit(msix_idx, bmap->msix_bmap);
+ spin_unlock_irqrestore(&bmap->lock, flags);
+}
+
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ struct adapter *adap = q->adap;
+
+ if (adap->uld[q->uld].lro_flush)
+ adap->uld[q->uld].lro_flush(&q->lro_mgr);
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct adapter *adap = q->adap;
+ struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ int ret;
+
+ /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
+ if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
+ rsp += 2;
+
+ if (q->flush_handler)
+ ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
+ rsp, gl, &q->lro_mgr,
+ &q->napi);
+ else
+ ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
+ rsp, gl);
+
+ if (ret) {
+ rxq->stats.nomem++;
+ return -1;
+ }
+
+ if (!gl)
+ rxq->stats.imm++;
+ else if (gl == CXGB4_MSG_AN)
+ rxq->stats.an++;
+ else
+ rxq->stats.pkts++;
+ return 0;
+}
+
+static int alloc_uld_rxqs(struct adapter *adap,
+ struct sge_uld_rxq_info *rxq_info,
+ unsigned int nq, unsigned int offset, bool lro)
+{
+ struct sge *s = &adap->sge;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
+ unsigned short *ids = rxq_info->rspq_id + offset;
+ unsigned int per_chan = nq / adap->params.nports;
+ unsigned int bmap_idx = 0;
+ int i, err, msi_idx;
+
+ if (adap->flags & USING_MSIX)
+ msi_idx = 1;
+ else
+ msi_idx = -((int)s->intrq.abs_id + 1);
+
+ for (i = 0; i < nq; i++, q++) {
+ if (msi_idx >= 0) {
+ bmap_idx = get_msix_idx_from_bmap(adap);
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+ }
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+ adap->port[i / per_chan],
+ msi_idx,
+ q->fl.size ? &q->fl : NULL,
+ uldrx_handler,
+ lro ? uldrx_flush_handler : NULL,
+ 0);
+ if (err)
+ goto freeout;
+ if (msi_idx >= 0)
+ rxq_info->msix_tbl[i + offset] = bmap_idx;
+ memset(&q->stats, 0, sizeof(q->stats));
+ if (ids)
+ ids[i] = q->rspq.abs_id;
+ }
+ return 0;
+freeout:
+ q = rxq_info->uldrxq + offset;
+ for ( ; i; i--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+
+ /* We need to free rxq also in case of ciq allocation failure */
+ if (offset) {
+ q = rxq_info->uldrxq + offset;
+ for ( ; i; i--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+ }
+ return err;
+}
+
+static int
+setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
+
+ if (adap->flags & USING_MSIX) {
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
+ GFP_KERNEL);
+ if (!rxq_info->msix_tbl)
+ return -ENOMEM;
+ }
+
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
+ !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
+ rxq_info->nrxq, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+ if (adap->flags & FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+ u32 param, cmdop;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+ return ret;
+}
+
+static void t4_free_uld_rxqs(struct adapter *adap, int n,
+ struct sge_ofld_rxq *q)
+{
+ for ( ; n; n--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+}
+
+static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+
+ if (rxq_info->nciq)
+ t4_free_uld_rxqs(adap, rxq_info->nciq,
+ rxq_info->uldrxq + rxq_info->nrxq);
+ t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
+ if (adap->flags & USING_MSIX)
+ kfree(rxq_info->msix_tbl);
+}
+
+static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
+ const struct cxgb4_uld_info *uld_info)
+{
+ struct sge *s = &adap->sge;
+ struct sge_uld_rxq_info *rxq_info;
+ int i, nrxq, ciq_size;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+ i = min_t(int, uld_info->nrxq,
+ num_online_cpus());
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ }
+ if (!uld_info->ciq) {
+ rxq_info->nciq = 0;
+ } else {
+ if (adap->flags & USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+ rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
+ num_online_cpus());
+ rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
+ adap->params.nports);
+ rxq_info->nciq = max_t(int, rxq_info->nciq,
+ adap->params.nports);
+ }
+
+ nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
+ rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
+ GFP_KERNEL);
+ if (!rxq_info->uldrxq) {
+ kfree(rxq_info);
+ return -ENOMEM;
+ }
+
+ rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
+ if (!rxq_info->rspq_id) {
+ kfree(rxq_info->uldrxq);
+ kfree(rxq_info);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rxq_info->nrxq; i++) {
+ struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
+ r->rspq.uld = uld_type;
+ r->fl.size = 72;
+ }
+
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
+ for (i = rxq_info->nrxq; i < nrxq; i++) {
+ struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+ r->rspq.uld = uld_type;
+ }
+
+ memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
+ adap->sge.uld_rxq_info[uld_type] = rxq_info;
+
+ return 0;
+}
+
+static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ kfree(rxq_info->rspq_id);
+ kfree(rxq_info->uldrxq);
+ kfree(rxq_info);
+}
+
+static int
+request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int err = 0;
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+ err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info_ulds[bmap_idx].desc,
+ &rxq_info->uldrxq[idx].rspq);
+ if (err)
+ goto unwind;
+ }
+ return 0;
+unwind:
+ while (idx-- > 0) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+ free_msix_idx_in_bmap(adap, bmap_idx);
+ free_irq(adap->msix_info_ulds[bmap_idx].vec,
+ &rxq_info->uldrxq[idx].rspq);
+ }
+ return err;
+}
+
+static void
+free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+
+ free_msix_idx_in_bmap(adap, bmap_idx);
+ free_irq(adap->msix_info_ulds[bmap_idx].vec,
+ &rxq_info->uldrxq[idx].rspq);
+ }
+}
+
+static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int n = sizeof(adap->msix_info_ulds[0].desc);
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+
+ snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
+ adap->port[0]->name, rxq_info->name, idx);
+ }
+}
+
+static void enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (!q)
+ return;
+
+ if (q->handler) {
+ cxgb_busy_poll_init_lock(q);
+ napi_enable(&q->napi);
+ }
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
+}
+
+static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (q && q->handler) {
+ napi_disable(&q->napi);
+ local_bh_disable();
+ while (!cxgb_poll_lock_napi(q))
+ mdelay(1);
+ local_bh_enable();
+ }
+}
+
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int idx;
+
+ for_each_uldrxq(rxq_info, idx)
+ enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
+}
+
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int idx;
+
+ for_each_uldrxq(rxq_info, idx)
+ quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
+}
+
+static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
+ struct cxgb4_lld_info *lli)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ lli->rxq_ids = rxq_info->rspq_id;
+ lli->nrxq = rxq_info->nrxq;
+ lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
+ lli->nciq = rxq_info->nciq;
+}
+
+int t4_uld_mem_alloc(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+
+ adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
+ if (!adap->uld)
+ return -ENOMEM;
+
+ s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
+ sizeof(struct sge_uld_rxq_info *),
+ GFP_KERNEL);
+ if (!s->uld_rxq_info)
+ goto err_uld;
+
+ return 0;
+err_uld:
+ kfree(adap->uld);
+ return -ENOMEM;
+}
+
+void t4_uld_mem_free(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+
+ kfree(s->uld_rxq_info);
+ kfree(adap->uld);
+}
+
+void t4_uld_clean_up(struct adapter *adap)
+{
+ struct sge_uld_rxq_info *rxq_info;
+ unsigned int i;
+
+ if (!adap->uld)
+ return;
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ if (!adap->uld[i].handle)
+ continue;
+ rxq_info = adap->sge.uld_rxq_info[i];
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, i);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, i);
+ free_sge_queues_uld(adap, i);
+ free_queues_uld(adap, i);
+ }
+}
+
+static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
+{
+ int i;
+
+ lld->pdev = adap->pdev;
+ lld->pf = adap->pf;
+ lld->l2t = adap->l2t;
+ lld->tids = &adap->tids;
+ lld->ports = adap->port;
+ lld->vr = &adap->vres;
+ lld->mtus = adap->params.mtus;
+ lld->ntxq = adap->sge.ofldqsets;
+ lld->nchan = adap->params.nports;
+ lld->nports = adap->params.nports;
+ lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lld->iscsi_ppm = &adap->iscsi_ppm;
+ lld->adapter_type = adap->params.chip;
+ lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
+ lld->udb_density = 1 << adap->params.sge.eq_qpp;
+ lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+ lld->filt_mode = adap->params.tp.vlan_pri_map;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lld->tx_modq[i] = i;
+ lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+ lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
+ lld->fw_vers = adap->params.fw_vers;
+ lld->dbfifo_int_thresh = dbfifo_int_thresh;
+ lld->sge_ingpadboundary = adap->sge.fl_align;
+ lld->sge_egrstatuspagesize = adap->sge.stat_len;
+ lld->sge_pktshift = adap->sge.pktshift;
+ lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lld->max_ordird_qp = adap->params.max_ordird_qp;
+ lld->max_ird_adapter = adap->params.max_ird_adapter;
+ lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+ lld->nodeid = dev_to_node(adap->pdev_dev);
+}
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+ void *handle;
+ struct cxgb4_lld_info lli;
+
+ uld_init(adap, &lli);
+ uld_queue_init(adap, uld, &lli);
+
+ handle = adap->uld[uld].add(&lli);
+ if (IS_ERR(handle)) {
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ adap->uld[uld].name, PTR_ERR(handle));
+ return;
+ }
+
+ adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
+
+ if (adap->flags & FULL_INIT_DONE)
+ adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+}
+
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+ unsigned int adap_idx = 0;
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ ret = cfg_queues_uld(adap, type, p);
+ if (ret)
+ goto out;
+ ret = setup_sge_queues_uld(adap, type, p->lro);
+ if (ret)
+ goto free_queues;
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs_uld(adap, type);
+ ret = request_msix_queue_irqs_uld(adap, type);
+ if (ret)
+ goto free_rxq;
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ enable_rx_uld(adap, type);
+ if (adap->uld[type].add) {
+ ret = -EBUSY;
+ goto free_irq;
+ }
+ adap->uld[type] = *p;
+ uld_attach(adap, type);
+ adap_idx++;
+ }
+ mutex_unlock(&uld_mutex);
+ return 0;
+
+free_irq:
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+free_rxq:
+ free_sge_queues_uld(adap, type);
+free_queues:
+ free_queues_uld(adap, type);
+out:
+
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ if (!adap_idx)
+ break;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ adap_idx--;
+ }
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ }
+ mutex_unlock(&uld_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index f3c58aaa932d..47bd14f602db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,8 +32,8 @@
* SOFTWARE.
*/
-#ifndef __CXGB4_OFLD_H
-#define __CXGB4_OFLD_H
+#ifndef __CXGB4_ULD_H
+#define __CXGB4_ULD_H
#include <linux/cache.h>
#include <linux/spinlock.h>
@@ -42,6 +42,8 @@
#include <linux/atomic.h>
#include "cxgb4.h"
+#define MAX_ULD_QSETS 16
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
@@ -104,6 +106,7 @@ struct tid_info {
unsigned int atid_base;
struct filter_entry *ftid_tab;
+ unsigned long *ftid_bmap;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -124,6 +127,8 @@ struct tid_info {
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ /* lock for setting/clearing filter bitmap */
+ spinlock_t ftid_lock;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -183,15 +188,38 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
+/* Filter operation context to allow callers of cxgb4_set_filter() and
+ * cxgb4_del_filter() to wait for an asynchronous completion.
+ */
+struct filter_ctx {
+ struct completion completion; /* completion rendezvous */
+ void *closure; /* caller's opaque information */
+ int result; /* result of operation */
+ u32 tid; /* to store tid */
+};
+
+struct ch_filter_specification;
+
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx);
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs);
+int cxgb4_del_filter(struct net_device *dev, int filter_id);
+
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
}
enum cxgb4_uld {
+ CXGB4_ULD_INIT,
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
+ CXGB4_ULD_CRYPTO,
CXGB4_ULD_MAX
};
@@ -284,6 +312,11 @@ struct cxgb4_lld_info {
struct cxgb4_uld_info {
const char *name;
+ void *handle;
+ unsigned int nrxq;
+ unsigned int rxq_size;
+ bool ciq;
+ bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
@@ -330,4 +363,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
-#endif /* !__CXGB4_OFLD_H */
+#endif /* !__CXGB4_ULD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
new file mode 100644
index 000000000000..539de764bbd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -0,0 +1,556 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "cxgb4.h"
+#include "sched.h"
+
+/* Spinlock must be held by caller */
+static int t4_sched_class_fw_cmd(struct port_info *pi,
+ struct ch_sched_params *p,
+ enum sched_fw_ops op)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ int err = 0;
+
+ e = &s->tab[p->u.params.class];
+ switch (op) {
+ case SCHED_FW_OP_ADD:
+ err = t4_sched_params(adap, p->type,
+ p->u.params.level, p->u.params.mode,
+ p->u.params.rateunit,
+ p->u.params.ratemode,
+ p->u.params.channel, e->idx,
+ p->u.params.minrate, p->u.params.maxrate,
+ p->u.params.weight, p->u.params.pktsize);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/* Spinlock must be held by caller */
+static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
+ enum sched_bind_type type, bool bind)
+{
+ struct adapter *adap = pi->adapter;
+ u32 fw_mnem, fw_class, fw_param;
+ unsigned int pf = adap->pf;
+ unsigned int vf = 0;
+ int err = 0;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ qe = (struct sched_queue_entry *)arg;
+
+ /* Create a template for the FW_PARAMS_CMD mnemonic and
+ * value (TX Scheduling Class in this case).
+ */
+ fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
+ fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
+ fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
+
+ pf = adap->pf;
+ vf = 0;
+ break;
+ }
+ default:
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
+
+out:
+ return err;
+}
+
+static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
+ const unsigned int qid,
+ int *index)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e, *end;
+ struct sched_class *found = NULL;
+ int i;
+
+ /* Look for a class with matching bound queue parameters */
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ struct sched_queue_entry *qe;
+
+ i = 0;
+ if (e->state == SCHED_STATE_UNUSED)
+ continue;
+
+ list_for_each_entry(qe, &e->queue_list, list) {
+ if (qe->cntxt_id == qid) {
+ found = e;
+ if (index)
+ *index = i;
+ break;
+ }
+ i++;
+ }
+
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ struct sched_queue_entry *qe = NULL;
+ struct sge_eth_txq *txq;
+ unsigned int qid;
+ int index = -1;
+ int err = 0;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return -ERANGE;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qid = txq->q.cntxt_id;
+
+ /* Find the existing class that the queue is bound to */
+ e = t4_sched_queue_lookup(pi, qid, &index);
+ if (e && index >= 0) {
+ int i = 0;
+
+ spin_lock(&e->lock);
+ list_for_each_entry(qe, &e->queue_list, list) {
+ if (i == index)
+ break;
+ i++;
+ }
+ err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
+ false);
+ if (err) {
+ spin_unlock(&e->lock);
+ goto out;
+ }
+
+ list_del(&qe->list);
+ t4_free_mem(qe);
+ if (atomic_dec_and_test(&e->refcnt)) {
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+ spin_unlock(&e->lock);
+ }
+out:
+ return err;
+}
+
+static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ struct sched_queue_entry *qe = NULL;
+ struct sge_eth_txq *txq;
+ unsigned int qid;
+ int err = 0;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return -ERANGE;
+
+ qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
+ if (!qe)
+ return -ENOMEM;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qid = txq->q.cntxt_id;
+
+ /* Unbind queue from any existing class */
+ err = t4_sched_queue_unbind(pi, p);
+ if (err)
+ goto out;
+
+ /* Bind queue to specified class */
+ memset(qe, 0, sizeof(*qe));
+ qe->cntxt_id = qid;
+ memcpy(&qe->param, p, sizeof(qe->param));
+
+ e = &s->tab[qe->param.class];
+ spin_lock(&e->lock);
+ err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
+ if (err) {
+ t4_free_mem(qe);
+ spin_unlock(&e->lock);
+ goto out;
+ }
+
+ list_add_tail(&qe->list, &e->queue_list);
+ atomic_inc(&e->refcnt);
+ spin_unlock(&e->lock);
+out:
+ return err;
+}
+
+static void t4_sched_class_unbind_all(struct port_info *pi,
+ struct sched_class *e,
+ enum sched_bind_type type)
+{
+ if (!e)
+ return;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->queue_list, list)
+ t4_sched_queue_unbind(pi, &qe->param);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
+ enum sched_bind_type type, bool bind)
+{
+ int err = 0;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ if (bind)
+ err = t4_sched_queue_bind(pi, qe);
+ else
+ err = t4_sched_queue_unbind(pi, qe);
+ break;
+ }
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * cxgb4_sched_class_bind - Bind an entity to a scheduling class
+ * @dev: net_device pointer
+ * @arg: Entity opaque data
+ * @type: Entity type (Queue)
+ *
+ * Binds an entity (queue) to a scheduling class. If the entity
+ * is bound to another class, it will be unbound from the other class
+ * and bound to the class specified in @arg.
+ */
+int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
+ enum sched_bind_type type)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s;
+ int err = 0;
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ class_id = qe->class;
+ break;
+ }
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!valid_class_id(dev, class_id))
+ return -EINVAL;
+
+ if (class_id == SCHED_CLS_NONE)
+ return -ENOTSUPP;
+
+ s = pi->sched_tbl;
+ write_lock(&s->rw_lock);
+ err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
+ write_unlock(&s->rw_lock);
+
+ return err;
+}
+
+/**
+ * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
+ * @dev: net_device pointer
+ * @arg: Entity opaque data
+ * @type: Entity type (Queue)
+ *
+ * Unbinds an entity (queue) from a scheduling class.
+ */
+int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
+ enum sched_bind_type type)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s;
+ int err = 0;
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ class_id = qe->class;
+ break;
+ }
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!valid_class_id(dev, class_id))
+ return -EINVAL;
+
+ s = pi->sched_tbl;
+ write_lock(&s->rw_lock);
+ err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
+ write_unlock(&s->rw_lock);
+
+ return err;
+}
+
+/* If @p is NULL, fetch any available unused class */
+static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e, *end;
+ struct sched_class *found = NULL;
+
+ if (!p) {
+ /* Get any available unused class */
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ if (e->state == SCHED_STATE_UNUSED) {
+ found = e;
+ break;
+ }
+ }
+ } else {
+ /* Look for a class with matching scheduling parameters */
+ struct ch_sched_params info;
+ struct ch_sched_params tp;
+
+ memset(&info, 0, sizeof(info));
+ memset(&tp, 0, sizeof(tp));
+
+ memcpy(&tp, p, sizeof(tp));
+ /* Don't try to match class parameter */
+ tp.u.params.class = SCHED_CLS_NONE;
+
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ if (e->state == SCHED_STATE_UNUSED)
+ continue;
+
+ memset(&info, 0, sizeof(info));
+ memcpy(&info, &e->info, sizeof(info));
+ /* Don't try to match class parameter */
+ info.u.params.class = SCHED_CLS_NONE;
+
+ if ((info.type == tp.type) &&
+ (!memcmp(&info.u.params, &tp.u.params,
+ sizeof(info.u.params)))) {
+ found = e;
+ break;
+ }
+ }
+ }
+
+ return found;
+}
+
+static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ u8 class_id;
+ int err;
+
+ if (!p)
+ return NULL;
+
+ class_id = p->u.params.class;
+
+ /* Only accept search for existing class with matching params
+ * or allocation of new class with specified params
+ */
+ if (class_id != SCHED_CLS_NONE)
+ return NULL;
+
+ write_lock(&s->rw_lock);
+ /* See if there's an exisiting class with same
+ * requested sched params
+ */
+ e = t4_sched_class_lookup(pi, p);
+ if (!e) {
+ struct ch_sched_params np;
+
+ /* Fetch any available unused class */
+ e = t4_sched_class_lookup(pi, NULL);
+ if (!e)
+ goto out;
+
+ memset(&np, 0, sizeof(np));
+ memcpy(&np, p, sizeof(np));
+ np.u.params.class = e->idx;
+
+ spin_lock(&e->lock);
+ /* New class */
+ err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
+ if (err) {
+ spin_unlock(&e->lock);
+ e = NULL;
+ goto out;
+ }
+ memcpy(&e->info, &np, sizeof(e->info));
+ atomic_set(&e->refcnt, 0);
+ e->state = SCHED_STATE_ACTIVE;
+ spin_unlock(&e->lock);
+ }
+
+out:
+ write_unlock(&s->rw_lock);
+ return e;
+}
+
+/**
+ * cxgb4_sched_class_alloc - allocate a scheduling class
+ * @dev: net_device pointer
+ * @p: new scheduling class to create.
+ *
+ * Returns pointer to the scheduling class created. If @p is NULL, then
+ * it allocates and returns any available unused scheduling class. If a
+ * scheduling class with matching @p is found, then the matching class is
+ * returned.
+ */
+struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return NULL;
+
+ class_id = p->u.params.class;
+ if (!valid_class_id(dev, class_id))
+ return NULL;
+
+ return t4_sched_class_alloc(pi, p);
+}
+
+static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+{
+ t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+}
+
+struct sched_table *t4_init_sched(unsigned int sched_size)
+{
+ struct sched_table *s;
+ unsigned int i;
+
+ s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
+ if (!s)
+ return NULL;
+
+ s->sched_size = sched_size;
+ rwlock_init(&s->rw_lock);
+
+ for (i = 0; i < s->sched_size; i++) {
+ memset(&s->tab[i], 0, sizeof(struct sched_class));
+ s->tab[i].idx = i;
+ s->tab[i].state = SCHED_STATE_UNUSED;
+ INIT_LIST_HEAD(&s->tab[i].queue_list);
+ spin_lock_init(&s->tab[i].lock);
+ atomic_set(&s->tab[i].refcnt, 0);
+ }
+ return s;
+}
+
+void t4_cleanup_sched(struct adapter *adap)
+{
+ struct sched_table *s;
+ unsigned int i;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = netdev2pinfo(adap->port[i]);
+
+ s = pi->sched_tbl;
+ for (i = 0; i < s->sched_size; i++) {
+ struct sched_class *e;
+
+ write_lock(&s->rw_lock);
+ e = &s->tab[i];
+ if (e->state == SCHED_STATE_ACTIVE)
+ t4_sched_class_free(pi, e);
+ write_unlock(&s->rw_lock);
+ }
+ t4_free_mem(s);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
new file mode 100644
index 000000000000..77b2b3fd9021
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_SCHED_H
+#define __CXGB4_SCHED_H
+
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#define SCHED_CLS_NONE 0xff
+
+#define FW_SCHED_CLS_NONE 0xffffffff
+
+/* Max rate that can be set to a scheduling class is 10 Gbps */
+#define SCHED_MAX_RATE_KBPS 10000000U
+
+enum {
+ SCHED_STATE_ACTIVE,
+ SCHED_STATE_UNUSED,
+};
+
+enum sched_fw_ops {
+ SCHED_FW_OP_ADD,
+};
+
+enum sched_bind_type {
+ SCHED_QUEUE,
+};
+
+struct sched_queue_entry {
+ struct list_head list;
+ unsigned int cntxt_id;
+ struct ch_sched_queue param;
+};
+
+struct sched_class {
+ u8 state;
+ u8 idx;
+ struct ch_sched_params info;
+ struct list_head queue_list;
+ spinlock_t lock; /* Per class lock */
+ atomic_t refcnt;
+};
+
+struct sched_table { /* per port scheduling table */
+ u8 sched_size;
+ rwlock_t rw_lock; /* Table lock */
+ struct sched_class tab[0];
+};
+
+static inline bool can_sched(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ return !pi->sched_tbl ? false : true;
+}
+
+static inline bool valid_class_id(struct net_device *dev, u8 class_id)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ if ((class_id > pi->sched_tbl->sched_size - 1) &&
+ (class_id != SCHED_CLS_NONE))
+ return false;
+
+ return true;
+}
+
+int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
+ enum sched_bind_type type);
+int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
+ enum sched_bind_type type);
+
+struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
+
+struct sched_table *t4_init_sched(unsigned int size);
+void t4_cleanup_sched(struct adapter *adap);
+#endif /* __CXGB4_SCHED_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ad3552df0545..1e74fd6085df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return 0;
}
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid)
+{
+ u32 param, val;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
+ FW_PARAMS_PARAM_YZ_V(eqid));
+ val = cmplqid;
+ return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+}
+
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid)
{
@@ -2928,8 +2940,8 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
q->desc = NULL;
}
-static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
- struct sge_fl *fl)
+void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
{
struct sge *s = &adap->sge;
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
- t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
-
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 660204bff726..20dec85da63d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -2729,7 +2729,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
out:
vfree(vpd);
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -8269,3 +8269,73 @@ void t4_idma_monitor(struct adapter *adapter,
t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
}
}
+
+/**
+ * t4_set_vf_mac - Set MAC address for the specified VF
+ * @adapter: The adapter
+ * @vf: one of the VFs instantiated by the specified PF
+ * @naddr: the number of MAC addresses
+ * @addr: the MAC address(es) to be set to the specified VF
+ */
+int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ unsigned int naddr, u8 *addr)
+{
+ struct fw_acl_mac_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
+ FW_ACL_MAC_CMD_VFN_V(vf));
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ cmd.nmac = naddr;
+
+ switch (adapter->pf) {
+ case 3:
+ memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
+ break;
+ case 2:
+ memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
+ break;
+ case 1:
+ memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
+ break;
+ case 0:
+ memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
+ break;
+ }
+
+ return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
+}
+
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+ int rateunit, int ratemode, int channel, int class,
+ int minrate, int maxrate, int weight, int pktsize)
+{
+ struct fw_sched_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+ cmd.u.params.type = type;
+ cmd.u.params.level = level;
+ cmd.u.params.mode = mode;
+ cmd.u.params.ch = channel;
+ cmd.u.params.cl = class;
+ cmd.u.params.unit = rateunit;
+ cmd.u.params.rate = ratemode;
+ cmd.u.params.min = cpu_to_be32(minrate);
+ cmd.u.params.max = cpu_to_be32(maxrate);
+ cmd.u.params.weight = cpu_to_be16(weight);
+ cmd.u.params.pktsize = cpu_to_be16(pktsize);
+
+ return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
+ NULL, 1);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index e0ebe1378cb2..fba3b2ad382d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -61,6 +61,7 @@ enum {
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32,
CPL_ISCSI_HDR = 0x33,
CPL_RDMA_CQE = 0x35,
@@ -83,6 +84,10 @@ enum {
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
+ CPL_TX_TLS_PDU = 0x88,
+ CPL_TX_SEC_PDU = 0x8A,
+ CPL_TX_TLS_ACK = 0x8B,
+
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -94,6 +99,8 @@ enum {
CPL_FW4_PLD = 0xC1,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PHYS_DSGL = 0xD0,
+
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
CPL_TX_PKT_LSO = 0xED,
@@ -1362,6 +1369,15 @@ struct ulptx_idata {
__be32 len;
};
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+#define ULPTX_CMD_S 24
+#define ULPTX_CMD_M 0xFF
+#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
+
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
@@ -1369,6 +1385,22 @@ struct ulptx_idata {
#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
#define ULPTX_MORE_F ULPTX_MORE_V(1U)
+#define ULP_TXPKT_DEST_S 16
+#define ULP_TXPKT_DEST_M 0x3
+#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S)
+
+#define ULP_TXPKT_FID_S 4
+#define ULP_TXPKT_FID_M 0x7ff
+#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S)
+
+#define ULP_TXPKT_RO_S 3
+#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
+#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+
+#define ULP_TX_SC_MORE_S 23
+#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
+#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
+
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
@@ -1406,4 +1438,409 @@ struct ulp_mem_io {
#define ULP_MEMIO_DATA_LEN_S 0
#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_M 0xFFFF
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M)
+
+struct ulptx_sc_memrd {
+ __be32 cmd_to_len;
+ __be32 addr;
+};
+
+#define ULP_TXPKT_DATAMODIFY_S 23
+#define ULP_TXPKT_DATAMODIFY_M 0x1
+#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S)
+#define ULP_TXPKT_DATAMODIFY_G(x) \
+ (((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M)
+#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U)
+
+#define ULP_TXPKT_CHANNELID_S 22
+#define ULP_TXPKT_CHANNELID_M 0x1
+#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S)
+#define ULP_TXPKT_CHANNELID_G(x) \
+ (((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M)
+#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U)
+
+#define SCMD_SEQ_NO_CTRL_S 29
+#define SCMD_SEQ_NO_CTRL_M 0x3
+#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S)
+#define SCMD_SEQ_NO_CTRL_G(x) \
+ (((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M)
+
+/* StsFieldPrsnt- Status field at the end of the TLS PDU */
+#define SCMD_STATUS_PRESENT_S 28
+#define SCMD_STATUS_PRESENT_M 0x1
+#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S)
+#define SCMD_STATUS_PRESENT_G(x) \
+ (((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M)
+#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U)
+
+/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic,
+ * 3-15: Reserved.
+ */
+#define SCMD_PROTO_VERSION_S 24
+#define SCMD_PROTO_VERSION_M 0xf
+#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S)
+#define SCMD_PROTO_VERSION_G(x) \
+ (((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M)
+
+/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */
+#define SCMD_ENC_DEC_CTRL_S 23
+#define SCMD_ENC_DEC_CTRL_M 0x1
+#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S)
+#define SCMD_ENC_DEC_CTRL_G(x) \
+ (((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M)
+#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U)
+
+/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */
+#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22
+#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1
+#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \
+ ((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S)
+#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \
+ (((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M)
+#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U)
+
+/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR,
+ * 4:Generic-AES, 5-15: Reserved.
+ */
+#define SCMD_CIPH_MODE_S 18
+#define SCMD_CIPH_MODE_M 0xf
+#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S)
+#define SCMD_CIPH_MODE_G(x) \
+ (((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M)
+
+/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256
+ * 4-15: Reserved
+ */
+#define SCMD_AUTH_MODE_S 14
+#define SCMD_AUTH_MODE_M 0xf
+#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S)
+#define SCMD_AUTH_MODE_G(x) \
+ (((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M)
+
+/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation
+ * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved
+ */
+#define SCMD_HMAC_CTRL_S 11
+#define SCMD_HMAC_CTRL_M 0x7
+#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S)
+#define SCMD_HMAC_CTRL_G(x) \
+ (((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M)
+
+/* IvSize - IV size in units of 2 bytes */
+#define SCMD_IV_SIZE_S 7
+#define SCMD_IV_SIZE_M 0xf
+#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S)
+#define SCMD_IV_SIZE_G(x) \
+ (((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M)
+
+/* NumIVs - Number of IVs */
+#define SCMD_NUM_IVS_S 0
+#define SCMD_NUM_IVS_M 0x7f
+#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S)
+#define SCMD_NUM_IVS_G(x) \
+ (((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M)
+
+/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber
+ * (below) are used as Cid (connection id for debug status), these
+ * bits are padded to zero for forming the 64 bit
+ * sequence number for TLS
+ */
+#define SCMD_ENB_DBGID_S 31
+#define SCMD_ENB_DBGID_M 0x1
+#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S)
+#define SCMD_ENB_DBGID_G(x) \
+ (((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M)
+
+/* IV generation in SW. */
+#define SCMD_IV_GEN_CTRL_S 30
+#define SCMD_IV_GEN_CTRL_M 0x1
+#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S)
+#define SCMD_IV_GEN_CTRL_G(x) \
+ (((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M)
+#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U)
+
+/* More frags */
+#define SCMD_MORE_FRAGS_S 20
+#define SCMD_MORE_FRAGS_M 0x1
+#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S)
+#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M)
+
+/*last frag */
+#define SCMD_LAST_FRAG_S 19
+#define SCMD_LAST_FRAG_M 0x1
+#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S)
+#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M)
+
+/* TlsCompPdu */
+#define SCMD_TLS_COMPPDU_S 18
+#define SCMD_TLS_COMPPDU_M 0x1
+#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S)
+#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M)
+
+/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/
+#define SCMD_KEY_CTX_INLINE_S 17
+#define SCMD_KEY_CTX_INLINE_M 0x1
+#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S)
+#define SCMD_KEY_CTX_INLINE_G(x) \
+ (((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M)
+#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U)
+
+/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */
+#define SCMD_TLS_FRAG_ENABLE_S 16
+#define SCMD_TLS_FRAG_ENABLE_M 0x1
+#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S)
+#define SCMD_TLS_FRAG_ENABLE_G(x) \
+ (((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M)
+#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U)
+
+/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only
+ * modes, in this case TLS_TX will drop the PDU and only
+ * send back the MAC bytes.
+ */
+#define SCMD_MAC_ONLY_S 15
+#define SCMD_MAC_ONLY_M 0x1
+#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S)
+#define SCMD_MAC_ONLY_G(x) \
+ (((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M)
+#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U)
+
+/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols
+ * which have complex AAD and IV formations Eg:AES-CCM
+ */
+#define SCMD_AADIVDROP_S 14
+#define SCMD_AADIVDROP_M 0x1
+#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S)
+#define SCMD_AADIVDROP_G(x) \
+ (((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M)
+#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U)
+
+/* HdrLength - Length of all headers excluding TLS header
+ * present before start of crypto PDU/payload.
+ */
+#define SCMD_HDR_LEN_S 0
+#define SCMD_HDR_LEN_M 0x3fff
+#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S)
+#define SCMD_HDR_LEN_G(x) \
+ (((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M)
+
+struct cpl_tx_sec_pdu {
+ __be32 op_ivinsrtofst;
+ __be32 pldlen;
+ __be32 aadstart_cipherstop_hi;
+ __be32 cipherstop_lo_authinsert;
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+ __be64 scmd1;
+};
+
+#define CPL_TX_SEC_PDU_OPCODE_S 24
+#define CPL_TX_SEC_PDU_OPCODE_M 0xff
+#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S)
+#define CPL_TX_SEC_PDU_OPCODE_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M)
+
+/* RX Channel Id */
+#define CPL_TX_SEC_PDU_RXCHID_S 22
+#define CPL_TX_SEC_PDU_RXCHID_M 0x1
+#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S)
+#define CPL_TX_SEC_PDU_RXCHID_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M)
+#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U)
+
+/* Ack Follows */
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S)
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M)
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U)
+
+/* Loopback bit in cpl_tx_sec_pdu */
+#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20
+#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1
+#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S)
+#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M)
+#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U)
+
+/* Length of cpl header encapsulated */
+#define CPL_TX_SEC_PDU_CPLLEN_S 16
+#define CPL_TX_SEC_PDU_CPLLEN_M 0xf
+#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S)
+#define CPL_TX_SEC_PDU_CPLLEN_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M)
+
+/* PlaceHolder */
+#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10
+#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1
+#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S)
+#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \
+ CPL_TX_SEC_PDU_PLACEHOLDER_M)
+
+/* IvInsrtOffset: Insertion location for IV */
+#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0
+#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff
+#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S)
+#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \
+ CPL_TX_SEC_PDU_IVINSRTOFST_M)
+
+/* AadStartOffset: Offset in bytes for AAD start from
+ * the first byte following the pkt headers (0-255 bytes)
+ */
+#define CPL_TX_SEC_PDU_AADSTART_S 24
+#define CPL_TX_SEC_PDU_AADSTART_M 0xff
+#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S)
+#define CPL_TX_SEC_PDU_AADSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \
+ CPL_TX_SEC_PDU_AADSTART_M)
+
+/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following
+ * the pkt headers (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_AADSTOP_S 15
+#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff
+#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S)
+#define CPL_TX_SEC_PDU_AADSTOP_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M)
+
+/* CipherStartOffset: offset in bytes for encryption/decryption start from the
+ * first byte following the pkt headers (0-1023 bytes)
+ */
+#define CPL_TX_SEC_PDU_CIPHERSTART_S 5
+#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff
+#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S)
+#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTART_M)
+
+/* CipherStopOffset: offset in bytes for encryption/decryption end
+ * from end of the payload of this command (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \
+ ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S)
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTOP_HI_M)
+
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \
+ ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S)
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTOP_LO_M)
+
+/* AuthStartOffset: offset in bytes for authentication start from
+ * the first byte following the pkt headers (0-1023)
+ */
+#define CPL_TX_SEC_PDU_AUTHSTART_S 18
+#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff
+#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S)
+#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \
+ CPL_TX_SEC_PDU_AUTHSTART_M)
+
+/* AuthStopOffset: offset in bytes for authentication
+ * end from end of the payload of this command (0-511 Bytes)
+ */
+#define CPL_TX_SEC_PDU_AUTHSTOP_S 9
+#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff
+#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S)
+#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \
+ CPL_TX_SEC_PDU_AUTHSTOP_M)
+
+/* AuthInsrtOffset: offset in bytes for authentication insertion
+ * from end of the payload of this command (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_AUTHINSERT_S 0
+#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff
+#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S)
+#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \
+ CPL_TX_SEC_PDU_AUTHINSERT_M)
+
+struct cpl_rx_phys_dsgl {
+ __be32 op_to_tid;
+ __be32 pcirlxorder_to_noofsgentr;
+ struct rss_header rss_hdr_int;
+};
+
+#define CPL_RX_PHYS_DSGL_OPCODE_S 24
+#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff
+#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S)
+#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M)
+
+#define CPL_RX_PHYS_DSGL_ISRDMA_S 23
+#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1
+#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S)
+#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M)
+#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U)
+
+#define CPL_RX_PHYS_DSGL_RSVD1_S 20
+#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7
+#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S)
+#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \
+ CPL_RX_PHYS_DSGL_RSVD1_M)
+
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S)
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \
+ CPL_RX_PHYS_DSGL_PCIRLXORDER_M)
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S)
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \
+ CPL_RX_PHYS_DSGL_PCINOSNOOP_M)
+
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S)
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \
+ CPL_RX_PHYS_DSGL_PCITPHNTENB_M)
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27
+#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3
+#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S)
+#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \
+ CPL_RX_PHYS_DSGL_PCITPHNT_M)
+
+#define CPL_RX_PHYS_DSGL_DCAID_S 16
+#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff
+#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S)
+#define CPL_RX_PHYS_DSGL_DCAID_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \
+ CPL_RX_PHYS_DSGL_DCAID_M)
+
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S)
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_M)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 30507d44422c..4b58b32105f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2009-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -102,6 +102,7 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45,
+ FW_CRYPTO_LOOKASIDE_WR = 0X6d,
FW_LASTC2E_WR = 0x70
};
@@ -680,6 +681,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_SCHED_CMD = 0x24,
FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
@@ -1060,7 +1062,7 @@ struct fw_caps_config_cmd {
__be16 niccaps;
__be16 ofldcaps;
__be16 rdmacaps;
- __be16 r4;
+ __be16 cryptocaps;
__be16 iscsicaps;
__be16 fcoecaps;
__be32 cfcsum;
@@ -2967,6 +2969,41 @@ struct fw_rss_vi_config_cmd {
#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S)
#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U)
+enum fw_sched_sc {
+ FW_SCHED_SC_PARAMS = 1,
+};
+
+struct fw_sched_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_sched {
+ struct fw_sched_config {
+ __u8 sc;
+ __u8 type;
+ __u8 minmaxen;
+ __u8 r3[5];
+ __u8 nclasses[4];
+ __be32 r4;
+ } config;
+ struct fw_sched_params {
+ __u8 sc;
+ __u8 type;
+ __u8 level;
+ __u8 mode;
+ __u8 unit;
+ __u8 rate;
+ __u8 ch;
+ __u8 cl;
+ __be32 min;
+ __be32 max;
+ __be16 weight;
+ __be16 pktsize;
+ __be16 burstsize;
+ __be16 r4;
+ } params;
+ } u;
+};
+
struct fw_clip_cmd {
__be32 op_to_write;
__be32 alloc_to_len16;
@@ -3255,4 +3292,127 @@ struct fw_devlog_cmd {
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr))
+
+struct fw_crypto_lookaside_wr {
+ __be32 op_to_cctx_size;
+ __be32 len16_pkd;
+ __be32 session_id;
+ __be32 rx_chid_to_rx_q_id;
+ __be32 key_addr;
+ __be32 pld_size_hash_size;
+ __be64 cookie;
+};
+
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_OPCODE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S)
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_COMPL_M)
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U)
+
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S)
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S)
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S)
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_LEN16_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S)
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S)
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S)
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_PHASH_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23
+#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S)
+#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S)
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_TX_CH_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S)
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e116bb8d1729..100b2cc064a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2378,7 +2378,7 @@ static void size_nports_qsets(struct adapter *adapter)
*/
pmask_nports = hweight32(adapter->params.vfres.pmask);
if (pmask_nports < adapter->params.nports) {
- dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
" virtual interfaces; limited by Port Access Rights"
" mask %#x\n", pmask_nports, adapter->params.nports,
adapter->params.vfres.pmask);
@@ -2777,6 +2777,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct adapter *adapter;
struct port_info *pi;
struct net_device *netdev;
+ unsigned int pf;
/*
* Print our driver banner the first time we're called to initialize a
@@ -2903,8 +2904,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Allocate our "adapter ports" and stitch everything together.
*/
pmask = adapter->params.vfres.pmask;
+ pf = t4vf_get_pf_from_vf(adapter);
for_each_port(adapter, pidx) {
int port_id, viid;
+ u8 mac[ETH_ALEN];
+ unsigned int naddr = 1;
/*
* We simplistically allocate our virtual interfaces
@@ -2975,6 +2979,26 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
pidx);
goto err_free_dev;
}
+
+ err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to determine MAC ACL address, "
+ "continuing anyway.. (status %d)\n", err);
+ } else if (naddr && adapter->params.vfres.nvi == 1) {
+ struct sockaddr addr;
+
+ ether_addr_copy(addr.sa_data, mac);
+ err = cxgb4vf_set_mac_addr(netdev, &addr);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to set MAC address %pM\n",
+ mac);
+ goto err_free_dev;
+ }
+ dev_info(&pdev->dev,
+ "Using assigned MAC ACL: %pM\n", mac);
+ }
}
/* See what interrupts we'll be using. If we've been configured to
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index c8fd4f8fe1fa..f3ed9ce99e5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1648,14 +1648,15 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
if (csum_ok && !pkt->err_vec &&
(be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
- if (!pkt->ip_frag)
+ if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else {
+ rxq->stats.rx_cso++;
+ } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
+ rxq->stats.rx_cso++;
}
- rxq->stats.rx_cso++;
} else
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 17a2bbcf93f0..b3903fe411aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -354,6 +354,7 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
+unsigned int t4vf_get_pf_from_vf(struct adapter *);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
@@ -388,5 +389,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
int t4vf_prep_adapter(struct adapter *);
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+ unsigned int *naddr, u8 *addr);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index b5622b1689e9..e98248f00fef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -640,6 +640,15 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter,
return 0;
}
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami));
+}
+
/**
* t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
* @adapter: the adapter
@@ -717,7 +726,6 @@ int t4vf_get_sge_params(struct adapter *adapter)
* read.
*/
if (!is_t4(adapter->params.chip)) {
- u32 whoami;
unsigned int pf, s_hps, s_qpp;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
@@ -741,11 +749,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
* register we just read. Do it once here so other code in
* the driver can just use it.
*/
- whoami = t4_read_reg(adapter,
- T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
- pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
- SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
-
+ pf = t4vf_get_pf_from_vf(adapter);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
sge_params->sge_vf_hps =
@@ -1812,3 +1816,50 @@ int t4vf_prep_adapter(struct adapter *adapter)
return 0;
}
+
+/**
+ * t4vf_get_vf_mac_acl - Get the MAC address to be set to
+ * the VI of this VF.
+ * @adapter: The adapter
+ * @pf: The pf associated with vf
+ * @naddr: the number of ACL MAC addresses returned in addr
+ * @addr: Placeholder for MAC addresses
+ *
+ * Find the MAC address to be set to the VF's VI. The requested MAC address
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+ unsigned int *naddr, u8 *addr)
+{
+ struct fw_acl_mac_cmd cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ if (cmd.nmac < *naddr)
+ *naddr = cmd.nmac;
+
+ switch (pf) {
+ case 3:
+ memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
+ break;
+ case 2:
+ memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
+ break;
+ case 1:
+ memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
+ break;
+ case 0:
+ memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index 2362230ef4fe..2534e30a1560 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
-libcxgb-y := libcxgb_ppm.o
+libcxgb-y := libcxgb_ppm.o libcxgb_cm.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
new file mode 100644
index 000000000000..0f0de5b63622
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include "libcxgb_cm.h"
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
+ int *iptype, __u8 *local_ip, __u8 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ if (ip->version == 4) {
+ pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+ __func__, ntohl(ip->saddr), ntohl(ip->daddr),
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+ __func__, ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+}
+EXPORT_SYMBOL(cxgb_get_4tuple);
+
+static bool
+cxgb_our_interface(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ struct net_device *egress_dev)
+{
+ int i;
+
+ egress_dev = get_real_dev(egress_dev);
+ for (i = 0; i < lldi->nports; i++)
+ if (lldi->ports[i] == egress_dev)
+ return true;
+ return false;
+}
+
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __be32 local_ip, __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct neighbour *n;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!cxgb_our_interface(lldi, get_real_dev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
+}
+EXPORT_SYMBOL(cxgb_find_route);
+
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __u8 *local_ip, __u8 *peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos, __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!cxgb_our_interface(lldi, get_real_dev,
+ ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+
+out:
+ return dst;
+}
+EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
new file mode 100644
index 000000000000..515b94ff9080
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIBCXGB_CM_H__
+#define __LIBCXGB_CM_H__
+
+
+#include <net/tcp.h>
+
+#include <cxgb4.h>
+#include <t4_msg.h>
+#include <l2t.h>
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
+ int *, __u8 *, __u8 *, __be16 *, __be16 *);
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __be32, __be32, __be16, __be16, u8);
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __u8 *, __u8 *, __be16, __be16, u8, __u32);
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static inline bool cxgb_is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static inline void
+cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts, int ipv6)
+{
+ unsigned short hdr_size = (ipv6 ?
+ sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct tcphdr) +
+ (use_ts ?
+ round_up(TCPOLEN_TIMESTAMP, 4) : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static inline u32 cxgb_compute_wscale(u32 win)
+{
+ u32 wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static inline void
+cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_tid_release *req;
+
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+}
+
+static inline void
+cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_close_con_req *req;
+
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_abort_req *req;
+
+ req = (struct cpl_abort_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_abort_rpl *rpl;
+
+ rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+ memset(rpl, 0, len);
+
+ INIT_TP_WR(rpl, tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+}
+
+static inline void
+cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ u32 credit_dack)
+{
+ struct cpl_rx_data_ack *req;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
+ req->credit_dack = cpu_to_be32(credit_dack);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
+}
+#endif
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f0e9e2ef62a0..6620fc861c47 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1966,7 +1966,7 @@ SetMulticastFilter(struct net_device *dev)
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+ hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
@@ -5043,7 +5043,7 @@ build_setup_frame(struct net_device *dev, int mode)
*(pa + i) = dev->dev_addr[i]; /* Host address */
if (i & 0x01) pa += 2;
}
- *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
} else {
for (i=0; i<ETH_ALEN; i++) { /* Host address */
*(pa + (i&1)) = dev->dev_addr[i];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
index ec756eba397b..1bfdc9b117f6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ b/drivers/net/ethernet/dec/tulip/de4x5.h
@@ -860,8 +860,8 @@
#define PCI 0
#define EISA 1
-#define HASH_TABLE_LEN 512 /* Bits */
-#define HASH_BITS 0x01ff /* 9 LS bits */
+#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
+#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
#define SETUP_FRAME_LEN 192 /* Bytes */
#define IMPERF_PA_OFFSET 156 /* Bytes */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 58c6338a839e..79d80090eac8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -867,7 +867,7 @@ static int netdev_open(struct net_device *dev)
/* Initialize other registers. */
__set_mac_addr(dev);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
#else
iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4555e041ef69..6cfa63a5e9b4 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.0.0.0"
+#define DRV_VER "11.1.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -399,13 +399,13 @@ enum vf_state {
#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_FLAGS_OS2BMC BIT(12)
+#define BE_FLAGS_TRY_RECOVERY BIT(13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define MAX_ERR_RECOVERY_RETRY_COUNT 3
#define ERR_DETECTION_DELAY 1000
-#define ERR_RECOVERY_RETRY_DELAY 30000
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
@@ -508,6 +508,70 @@ struct be_wrb_params {
u16 lso_mss; /* MSS for LSO */
};
+struct be_eth_addr {
+ unsigned char mac[ETH_ALEN];
+};
+
+#define BE_SEC 1000 /* in msec */
+#define BE_MIN (60 * BE_SEC) /* in msec */
+#define BE_HOUR (60 * BE_MIN) /* in msec */
+
+#define ERR_RECOVERY_MAX_RETRY_COUNT 3
+#define ERR_RECOVERY_DETECTION_DELAY BE_SEC
+#define ERR_RECOVERY_RETRY_DELAY (30 * BE_SEC)
+
+/* UE-detection-duration in BEx/Skyhawk:
+ * All PFs must wait for this duration after they detect UE before reading
+ * SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware
+ * guarantees that the SLIPORT_SEMAPHORE register is updated to indicate
+ * if the UE is recoverable.
+ */
+#define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
+
+/* Initial idle time (in msec) to elapse after driver load,
+ * before UE recovery is allowed.
+ */
+#define ERR_IDLE_HR 24
+#define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
+
+/* Time interval (in msec) after which UE recovery can be repeated */
+#define ERR_INTERVAL_HR 72
+#define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
+
+/* BEx/SH UE recovery state machine */
+enum {
+ ERR_RECOVERY_ST_NONE = 0, /* No Recovery */
+ ERR_RECOVERY_ST_DETECT = 1, /* UE detection duration */
+ ERR_RECOVERY_ST_RESET = 2, /* Reset Phase (PF0 only) */
+ ERR_RECOVERY_ST_PRE_POLL = 3, /* Pre-Poll Phase (all PFs) */
+ ERR_RECOVERY_ST_REINIT = 4 /* Re-initialize Phase */
+};
+
+struct be_error_recovery {
+ /* Lancer error recovery variables */
+ u8 recovery_retries;
+
+ /* BEx/Skyhawk error recovery variables */
+ u8 recovery_state;
+ u16 ue_to_reset_time; /* Time after UE, to soft reset
+ * the chip - PF0 only
+ */
+ u16 ue_to_poll_time; /* Time after UE, to Restart Polling
+ * of SLIPORT_SEMAPHORE reg
+ */
+ u16 last_err_code;
+ bool recovery_supported;
+ unsigned long probe_time;
+ unsigned long last_recovery_time;
+
+ /* Common to both Lancer & BEx/SH error recovery */
+ u32 resched_delay;
+ struct delayed_work err_detection_work;
+};
+
+/* Ethtool priv_flags */
+#define BE_DISABLE_TPE_RECOVERY 0x1
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -523,7 +587,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
@@ -556,7 +620,6 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
- struct delayed_work be_err_detection_work;
u8 recovery_retries;
u8 err_flags;
bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
@@ -570,9 +633,15 @@ struct be_adapter {
int if_handle; /* Used to configure filtering */
u32 if_flags; /* Interface filtering flags */
u32 *pmac_id; /* MAC addr handle used by BE card */
+ struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */
u32 uc_macs; /* Count of secondary UC MAC programmed */
+ struct be_eth_addr *mc_list;/* list of mcast addrs programmed */
+ u32 mc_count;
unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u16 vlans_added;
+ bool update_uc_list;
+ bool update_mc_list;
+ struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */
u32 beacon_state; /* for set_phys_id */
@@ -624,6 +693,18 @@ struct be_adapter {
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
+ u8 dev_mac[ETH_ALEN];
+ u32 priv_flags; /* ethtool get/set_priv_flags() */
+ struct be_error_recovery error_recovery;
+};
+
+/* Used for defered FW config cmds. Add fields to this struct as reqd */
+struct be_cmd_work {
+ struct work_struct work;
+ struct be_adapter *adapter;
+ union {
+ __be16 vxlan_port;
+ } info;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -848,6 +929,9 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
+#define be_error_recovering(adapter) \
+ (adapter->flags & BE_FLAGS_TRY_RECOVERY)
+
#define BE_ERROR_EEH 1
#define BE_ERROR_UE BIT(1)
#define BE_ERROR_FW BIT(2)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2cc11756859f..9cffe48be156 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -92,6 +92,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_COMMON,
BE_PRIV_DEVCFG | BE_PRIV_VHADM
},
+ {
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG
+ }
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -571,7 +576,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 120000 /* 12s timeout */
+#define mcc_timeout 12000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -585,7 +590,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- udelay(100);
+ usleep_range(500, 1000);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -705,7 +710,7 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
-static u16 be_POST_stage_get(struct be_adapter *adapter)
+u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
@@ -863,7 +868,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -874,7 +879,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- spin_unlock_bh(&adapter->mcc_lock);
+ return mutex_unlock(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1044,7 +1049,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1073,7 +1078,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1085,7 +1090,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1110,7 +1115,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1128,7 +1133,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1148,7 +1153,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1411,7 +1416,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1441,7 +1446,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1505,7 +1510,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1522,7 +1527,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1590,7 +1595,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1618,7 +1623,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1634,7 +1639,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1657,7 +1662,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1694,7 +1699,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1733,7 +1738,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1744,7 +1749,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1759,7 +1764,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1808,7 +1813,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60*1024);
@@ -1848,7 +1853,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
err:
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1859,7 +1864,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1882,7 +1887,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1896,7 +1901,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1919,7 +1924,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1946,7 +1951,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1968,7 +1973,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1979,7 +1984,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1996,8 +2001,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_flags = (value == ON) ? req->if_flags_mask : 0;
if (flags & BE_IF_FLAGS_MULTICAST) {
- struct netdev_hw_addr *ha;
- int i = 0;
+ int i;
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
@@ -2005,14 +2009,15 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
be_if_cap_flags(adapter));
- req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
- netdev_for_each_mc_addr(ha, adapter->netdev)
- memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
+ req->mcast_num = cpu_to_le32(adapter->mc_count);
+ for (i = 0; i < adapter->mc_count; i++)
+ ether_addr_copy(req->mcast_mac[i].byte,
+ adapter->mc_list[i].mac);
}
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2043,7 +2048,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2063,7 +2068,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2082,7 +2087,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2105,7 +2110,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2186,7 +2191,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2211,7 +2216,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2223,7 +2228,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2244,7 +2249,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2255,7 +2260,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2279,7 +2284,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2303,7 +2308,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2325,7 +2330,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data, PAGE_DATA_LEN);
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2342,7 +2347,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2384,7 +2389,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2403,7 +2408,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2457,7 +2462,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2475,7 +2480,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2488,7 +2493,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2522,7 +2527,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2534,7 +2539,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2559,7 +2564,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2570,7 +2575,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2581,7 +2586,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2608,7 +2613,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3192,7 +3197,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3209,7 +3214,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3224,7 +3229,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3247,7 +3252,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3256,7 +3261,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3273,7 +3278,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3299,7 +3304,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3307,7 +3312,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3323,7 +3328,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3357,7 +3362,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3368,7 +3373,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3384,7 +3389,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3399,7 +3404,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3444,7 +3449,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3454,7 +3459,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3474,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3581,7 +3586,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3613,7 +3618,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3625,7 +3630,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3645,7 +3650,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3677,7 +3682,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3741,7 +3746,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3801,7 +3806,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3823,7 +3828,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3859,7 +3864,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3900,7 +3905,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3914,7 +3919,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3961,7 +3966,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4127,6 +4132,10 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_get_ext_fat_caps *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -4138,7 +4147,7 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
req->parameter_type = cpu_to_le32(1);
@@ -4156,7 +4165,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4167,12 +4176,12 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4373,7 +4382,7 @@ err:
}
/* This routine returns a list of all the NIC PF_nums in the adapter */
-u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_pcie_res_desc *pcie = NULL;
@@ -4525,7 +4534,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4650,7 +4659,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4667,7 +4676,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4701,7 +4710,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4722,7 +4731,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4816,7 +4825,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4834,7 +4843,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4898,14 +4907,15 @@ err:
return status;
}
-int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
- int link_state, int version, u8 domain)
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4931,7 +4941,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4954,6 +4964,57 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
1, domain);
return status;
}
+
+int be_cmd_set_features(struct be_adapter *adapter)
+{
+ struct be_cmd_resp_set_features *resp;
+ struct be_cmd_req_set_features *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mcc_lock))
+ return -1;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FEATURES,
+ sizeof(*req), wrb, NULL);
+
+ req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
+ req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
+ req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
+
+ status = be_mcc_notify_wait(adapter);
+ if (status)
+ goto err;
+
+ resp = embedded_payload(wrb);
+
+ adapter->error_recovery.ue_to_poll_time =
+ le16_to_cpu(resp->parameter.resp.ue2rp);
+ adapter->error_recovery.ue_to_reset_time =
+ le16_to_cpu(resp->parameter.resp.ue2sr);
+ adapter->error_recovery.recovery_supported = true;
+err:
+ /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
+ * returns this error in older firmware versions
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_INVALID_LENGTH)
+ dev_info(&adapter->pdev->dev,
+ "Adapter does not support HW error recovery\n");
+
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
@@ -4964,7 +5025,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4987,7 +5048,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0d6be224a787..1bd82bcb3be5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -58,7 +58,8 @@ enum mcc_base_status {
MCC_STATUS_INSUFFICIENT_BUFFER = 4,
MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
MCC_STATUS_NOT_SUPPORTED = 66,
- MCC_STATUS_FEATURE_NOT_SUPPORTED = 68
+ MCC_STATUS_FEATURE_NOT_SUPPORTED = 68,
+ MCC_STATUS_INVALID_LENGTH = 116
};
/* Additional status */
@@ -294,8 +295,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
-#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125
-#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126
+#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES 125
+#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES 126
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
#define OPCODE_COMMON_GET_HSW_CONFIG 152
@@ -308,6 +309,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_COMMON_DELETE_OBJECT 174
+#define OPCODE_COMMON_SET_FEATURES 191
#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
@@ -2315,6 +2317,41 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
+/************** Set Features *******************/
+#define BE_FEATURE_UE_RECOVERY 0x10
+#define BE_UE_RECOVERY_UER_MASK 0x1
+
+struct be_req_ue_recovery {
+ u32 uer;
+ u32 rsvd;
+};
+
+struct be_cmd_req_set_features {
+ struct be_cmd_req_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_req_ue_recovery req;
+ u32 rsvd[2];
+ } parameter;
+};
+
+struct be_resp_ue_recovery {
+ u32 uer;
+ u16 ue2rp;
+ u16 ue2sr;
+};
+
+struct be_cmd_resp_set_features {
+ struct be_cmd_resp_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_resp_ue_recovery resp;
+ u32 rsvd[2];
+ } parameter;
+};
+
/*************** Set logical link ********************/
#define PLINK_ENABLE BIT(0)
#define PLINK_TRACK BIT(8)
@@ -2343,6 +2380,7 @@ struct be_cmd_req_manage_iface_filters {
u32 cap_control_flags;
} __packed;
+u16 be_POST_stage_get(struct be_adapter *adapter);
int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2470,3 +2508,4 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
struct be_resources *vft_res);
+int be_cmd_set_features(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 50e7be5da50c..0a48a31225e6 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -421,6 +421,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
}
}
+static const char be_priv_flags[][ETH_GSTRING_LEN] = {
+ "disable-tpe-recovery"
+};
+
static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
@@ -454,6 +458,10 @@ static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
data += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
+ break;
}
}
@@ -468,6 +476,8 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
return ETHTOOL_STATS_NUM +
adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(be_priv_flags);
default:
return -EINVAL;
}
@@ -1360,6 +1370,34 @@ err:
return be_cmd_status(status);
}
+static u32 be_get_priv_flags(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int be_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
+ bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
+
+ if (tpe_old != tpe_new) {
+ if (tpe_new) {
+ adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is disabled\n");
+ } else {
+ adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is enabled\n");
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1373,6 +1411,8 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
+ .set_priv_flags = be_set_priv_flags,
+ .get_priv_flags = be_get_priv_flags,
.get_strings = be_get_stat_strings,
.set_phys_id = be_set_phys_id,
.set_dump = be_set_dump,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index c684bb32b487..92942c84d329 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,18 +32,23 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31
+#define POST_ERR_RECOVERY_CODE_MASK 0xFFF
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-
+#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 874c7539a79d..dcb930a52613 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -41,6 +41,11 @@ static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+/* Per-module error detection/recovery workq shared across all functions.
+ * Each function schedules its own work request on this shared workq.
+ */
+static struct workqueue_struct *be_err_recovery_workq;
+
static const struct pci_device_id be_dev_ids[] = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -53,6 +58,10 @@ static const struct pci_device_id be_dev_ids[] = {
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
+
+/* Workqueue used by all functions for defering cmd calls to the adapter */
+static struct workqueue_struct *be_wq;
+
/* UE Status Low CSR */
static const char * const ue_status_low_desc[] = {
"CEV",
@@ -260,6 +269,38 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
+static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+{
+ int i;
+
+ /* Check if mac has already been added as part of uc-list */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
+ mac)) {
+ /* mac already added, skip addition */
+ adapter->pmac_id[0] = adapter->pmac_id[i + 1];
+ return 0;
+ }
+ }
+
+ return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+}
+
+static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ int i;
+
+ /* Skip deletion if the programmed mac is
+ * being used in uc-list
+ */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (adapter->pmac_id[i + 1] == pmac_id)
+ return;
+ }
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -267,7 +308,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int status;
u8 mac[ETH_ALEN];
- u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
+ u32 old_pmac_id = adapter->pmac_id[0];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -275,7 +316,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Proceed further only if, User provided MAC is different
* from active MAC
*/
- if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
/* if device is not running, copy MAC to netdev->dev_addr */
@@ -288,23 +329,22 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
* FILTMGMT privilege. This failure is OK, only if the PF programmed
* the MAC for the VF.
*/
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ mutex_lock(&adapter->rx_filter_lock);
+ status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
if (!status) {
- curr_pmac_id = adapter->pmac_id[0];
/* Delete the old programmed MAC. This call may fail if the
* old MAC was already deleted by the PF driver.
*/
if (adapter->pmac_id[0] != old_pmac_id)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- old_pmac_id, 0);
+ be_dev_mac_del(adapter, old_pmac_id);
}
+ mutex_unlock(&adapter->rx_filter_lock);
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
adapter->if_handle, true, 0);
if (status)
goto err;
@@ -317,6 +357,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
goto err;
}
done:
+ ether_addr_copy(adapter->dev_mac, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@@ -1420,13 +1461,18 @@ static int be_vid_config(struct be_adapter *adapter)
u16 num = 0, i = 0;
int status = 0;
- /* No need to further configure vids if in promiscuous mode */
- if (be_in_all_promisc(adapter))
+ /* No need to change the VLAN state if the I/F is in promiscuous */
+ if (adapter->netdev->flags & IFF_PROMISC)
return 0;
if (adapter->vlans_added > be_max_vlans(adapter))
return be_set_vlan_promisc(adapter);
+ if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ status = be_clear_vlan_promisc(adapter);
+ if (status)
+ return status;
+ }
/* Construct VLAN Table to give to HW */
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
@@ -1439,8 +1485,6 @@ static int be_vid_config(struct be_adapter *adapter)
addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
return be_set_vlan_promisc(adapter);
- } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- status = be_clear_vlan_promisc(adapter);
}
return status;
}
@@ -1450,46 +1494,45 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
+ mutex_lock(&adapter->rx_filter_lock);
+
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- return status;
+ goto done;
if (test_bit(vid, adapter->vids))
- return status;
+ goto done;
set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
- if (status) {
- adapter->vlans_added--;
- clear_bit(vid, adapter->vids);
- }
-
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
return status;
}
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ mutex_lock(&adapter->rx_filter_lock);
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- return 0;
+ goto done;
if (!test_bit(vid, adapter->vids))
- return 0;
+ goto done;
clear_bit(vid, adapter->vids);
adapter->vlans_added--;
- return be_vid_config(adapter);
-}
-
-static void be_clear_all_promisc(struct be_adapter *adapter)
-{
- be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
- adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+ status = be_vid_config(adapter);
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
+ return status;
}
static void be_set_all_promisc(struct be_adapter *adapter)
@@ -1510,75 +1553,226 @@ static void be_set_mc_promisc(struct be_adapter *adapter)
adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
}
-static void be_set_mc_list(struct be_adapter *adapter)
+static void be_set_uc_promisc(struct be_adapter *adapter)
{
int status;
- status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
if (!status)
- adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
- else
+ adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
+}
+
+static void be_clear_uc_promisc(struct be_adapter *adapter)
+{
+ int status;
+
+ if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
+}
+
+/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
+ * We use a single callback function for both sync and unsync. We really don't
+ * add/remove addresses through this callback. But, we use it to detect changes
+ * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
+ */
+static int be_uc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_uc_list = true;
+ return 0;
+}
+
+static int be_mc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_mc_list = true;
+ return 0;
+}
+
+static void be_set_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netdev_hw_addr *ha;
+ bool mc_promisc = false;
+ int status;
+
+ netif_addr_lock_bh(netdev);
+ __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
+
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_mc_list = false;
+ } else if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
+ /* Enable multicast promisc if num configured exceeds
+ * what we support
+ */
+ mc_promisc = true;
+ adapter->update_mc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
+ /* Update mc-list unconditionally if the iface was previously
+ * in mc-promisc mode and now is out of that mode.
+ */
+ adapter->update_mc_list = true;
+ }
+
+ if (adapter->update_mc_list) {
+ int i = 0;
+
+ /* cache the mc-list in adapter */
+ netdev_for_each_mc_addr(ha, netdev) {
+ ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
+ i++;
+ }
+ adapter->mc_count = netdev_mc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (mc_promisc) {
be_set_mc_promisc(adapter);
+ } else if (adapter->update_mc_list) {
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
+ else
+ be_set_mc_promisc(adapter);
+
+ adapter->update_mc_list = false;
+ }
+}
+
+static void be_clear_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ __dev_mc_unsync(netdev, NULL);
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
+ adapter->mc_count = 0;
+}
+
+static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
+{
+ if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->dev_mac)) {
+ adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
+ return 0;
+ }
+
+ return be_cmd_pmac_add(adapter,
+ (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->if_handle,
+ &adapter->pmac_id[uc_idx + 1], 0);
+}
+
+static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ if (pmac_id == adapter->pmac_id[0])
+ return;
+
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
}
static void be_set_uc_list(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct netdev_hw_addr *ha;
- int i = 1; /* First slot is claimed by the Primary MAC */
+ bool uc_promisc = false;
+ int curr_uc_macs = 0, i;
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ netif_addr_lock_bh(netdev);
+ __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
- if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
- be_set_all_promisc(adapter);
- return;
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_uc_list = false;
+ } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
+ uc_promisc = true;
+ adapter->update_uc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
+ /* Update uc-list unconditionally if the iface was previously
+ * in uc-promisc mode and now is out of that mode.
+ */
+ adapter->update_uc_list = true;
}
- netdev_for_each_uc_addr(ha, adapter->netdev) {
- adapter->uc_macs++; /* First slot is for Primary MAC */
- be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
- &adapter->pmac_id[adapter->uc_macs], 0);
+ if (adapter->update_uc_list) {
+ i = 1; /* First slot is claimed by the Primary MAC */
+
+ /* cache the uc-list in adapter array */
+ netdev_for_each_uc_addr(ha, netdev) {
+ ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
+ i++;
+ }
+ curr_uc_macs = netdev_uc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (uc_promisc) {
+ be_set_uc_promisc(adapter);
+ } else if (adapter->update_uc_list) {
+ be_clear_uc_promisc(adapter);
+
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
+ for (i = 0; i < curr_uc_macs; i++)
+ be_uc_mac_add(adapter, i);
+ adapter->uc_macs = curr_uc_macs;
+ adapter->update_uc_list = false;
}
}
static void be_clear_uc_list(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
int i;
- for (i = 1; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ __dev_uc_unsync(netdev, NULL);
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
adapter->uc_macs = 0;
}
-static void be_set_rx_mode(struct net_device *netdev)
+static void __be_set_rx_mode(struct be_adapter *adapter)
{
- struct be_adapter *adapter = netdev_priv(netdev);
+ struct net_device *netdev = adapter->netdev;
+
+ mutex_lock(&adapter->rx_filter_lock);
if (netdev->flags & IFF_PROMISC) {
- be_set_all_promisc(adapter);
- return;
+ if (!be_in_all_promisc(adapter))
+ be_set_all_promisc(adapter);
+ } else if (be_in_all_promisc(adapter)) {
+ /* We need to re-program the vlan-list or clear
+ * vlan-promisc mode (if needed) when the interface
+ * comes out of promisc mode.
+ */
+ be_vid_config(adapter);
}
- /* Interface was previously in promiscuous mode; disable it */
- if (be_in_all_promisc(adapter)) {
- be_clear_all_promisc(adapter);
- if (adapter->vlans_added)
- be_vid_config(adapter);
- }
+ be_set_uc_list(adapter);
+ be_set_mc_list(adapter);
- /* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter)) {
- be_set_mc_promisc(adapter);
- return;
- }
+ mutex_unlock(&adapter->rx_filter_lock);
+}
- if (netdev_uc_count(netdev) != adapter->uc_macs)
- be_set_uc_list(adapter);
+static void be_work_set_rx_mode(struct work_struct *work)
+{
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
- be_set_mc_list(adapter);
+ __be_set_rx_mode(cmd_work->adapter);
+ kfree(cmd_work);
}
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1701,7 +1895,8 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1713,6 +1908,9 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
status = be_set_vf_tvt(adapter, vf, vlan);
@@ -3220,9 +3418,7 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- dev_err(dev,
- "Unrecoverable Error detected in the adapter");
- dev_err(dev, "Please reboot server to recover");
+ dev_err(dev, "Error detected in the adapter");
if (skyhawk_chip(adapter))
be_set_error(adapter, BE_ERROR_UE);
@@ -3425,10 +3621,9 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
-
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
be_clear_uc_list(adapter);
+ be_clear_mc_list(adapter);
/* The IFACE flags are enabled in the open path and cleared
* in the close path. When a VF gets detached from the host and
@@ -3462,6 +3657,11 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ /* Before attempting cleanup ensure all the pending cmds in the
+ * config_wq have finished execution
+ */
+ flush_workqueue(be_wq);
+
be_disable_if_filters(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3576,17 +3776,16 @@ static int be_enable_if_filters(struct be_adapter *adapter)
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
- status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
+ ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
if (adapter->vlans_added)
be_vid_config(adapter);
- be_set_rx_mode(adapter->netdev);
+ __be_set_rx_mode(adapter);
return 0;
}
@@ -3759,8 +3958,13 @@ static void be_cancel_worker(struct be_adapter *adapter)
static void be_cancel_err_detection(struct be_adapter *adapter)
{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
- cancel_delayed_work_sync(&adapter->be_err_detection_work);
+ cancel_delayed_work_sync(&err_rec->err_detection_work);
adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
}
@@ -3860,6 +4064,20 @@ static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
+static void be_if_destroy(struct be_adapter *adapter)
+{
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+
+ kfree(adapter->mc_list);
+ adapter->mc_list = NULL;
+
+ kfree(adapter->uc_list);
+ adapter->uc_list = NULL;
+}
+
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -3867,6 +4085,8 @@ static int be_clear(struct be_adapter *adapter)
be_cancel_worker(adapter);
+ flush_workqueue(be_wq);
+
if (sriov_enabled(adapter))
be_vf_clear(adapter);
@@ -3884,10 +4104,8 @@ static int be_clear(struct be_adapter *adapter)
}
be_disable_vxlan_offloads(adapter);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ be_if_destroy(adapter);
be_clear_queues(adapter);
@@ -4151,7 +4369,7 @@ static void be_setup_init(struct be_adapter *adapter)
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
-void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
{
struct be_port_resources port_res = {0};
u8 rss_tables_on_port;
@@ -4341,14 +4559,29 @@ static int be_mac_setup(struct be_adapter *adapter)
static void be_schedule_worker(struct be_adapter *adapter)
{
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}
+static void be_destroy_err_recovery_workq(void)
+{
+ if (!be_err_recovery_workq)
+ return;
+
+ flush_workqueue(be_err_recovery_workq);
+ destroy_workqueue(be_err_recovery_workq);
+ be_err_recovery_workq = NULL;
+}
+
static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
{
- schedule_delayed_work(&adapter->be_err_detection_work,
- msecs_to_jiffies(delay));
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
+ queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
+ msecs_to_jiffies(delay));
adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
@@ -4393,6 +4626,22 @@ static int be_if_create(struct be_adapter *adapter)
u32 cap_flags = be_if_cap_flags(adapter);
int status;
+ /* alloc required memory for other filtering fields */
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
+ adapter->mc_list = kcalloc(be_max_mc(adapter),
+ sizeof(*adapter->mc_list), GFP_KERNEL);
+ if (!adapter->mc_list)
+ return -ENOMEM;
+
+ adapter->uc_list = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->uc_list), GFP_KERNEL);
+ if (!adapter->uc_list)
+ return -ENOMEM;
+
if (adapter->cfg_num_rx_irqs == 1)
cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
@@ -4401,7 +4650,10 @@ static int be_if_create(struct be_adapter *adapter)
status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
&adapter->if_handle, 0);
- return status;
+ if (status)
+ return status;
+
+ return 0;
}
int be_update_queues(struct be_adapter *adapter)
@@ -4458,10 +4710,15 @@ static inline int fw_major_num(const char *fw_ver)
return fw_major;
}
-/* If any VFs are already enabled don't FLR the PF */
+/* If it is error recovery, FLR the PF
+ * Else if any VFs are already enabled don't FLR the PF
+ */
static bool be_reset_required(struct be_adapter *adapter)
{
- return pci_num_vf(adapter->pdev) ? false : true;
+ if (be_error_recovering(adapter))
+ return true;
+ else
+ return pci_num_vf(adapter->pdev) == 0;
}
/* Wait for the FW to be ready and perform the required initialization */
@@ -4473,6 +4730,9 @@ static int be_func_init(struct be_adapter *adapter)
if (status)
return status;
+ /* FW is now ready; clear errors to allow cmds/doorbell */
+ be_clear_error(adapter, BE_CLEAR_ALL);
+
if (be_reset_required(adapter)) {
status = be_cmd_reset_function(adapter);
if (status)
@@ -4480,9 +4740,6 @@ static int be_func_init(struct be_adapter *adapter)
/* Wait for interrupts to quiesce after an FLR */
msleep(100);
-
- /* We can clear all errors when function reset succeeds */
- be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4530,11 +4787,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -4595,6 +4847,9 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
+ if (be_physfn(adapter) && !lancer_chip(adapter))
+ be_cmd_set_features(adapter);
+
be_schedule_worker(adapter);
adapter->flags |= BE_FLAGS_SETUP_DONE;
return 0;
@@ -4728,6 +4983,23 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
+static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
+ void (*func)(struct work_struct *))
+{
+ struct be_cmd_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ dev_err(&adapter->pdev->dev,
+ "be_work memory allocation failed\n");
+ return NULL;
+ }
+
+ INIT_WORK(&work->work, func);
+ work->adapter = adapter;
+ return work;
+}
+
/* VxLAN offload Notes:
*
* The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4742,23 +5014,19 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* adds more than one port, disable offloads and don't re-enable them again
* until after all the tunnels are removed.
*/
-static void be_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void be_work_add_vxlan_port(struct work_struct *work)
{
- struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
+ struct be_adapter *adapter = cmd_work->adapter;
+ struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
- __be16 port = ti->port;
+ __be16 port = cmd_work->info.vxlan_port;
int status;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
- return;
-
if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
adapter->vxlan_port_aliases++;
- return;
+ goto done;
}
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -4770,7 +5038,7 @@ static void be_add_vxlan_port(struct net_device *netdev,
}
if (adapter->vxlan_port_count++ >= 1)
- return;
+ goto done;
status = be_cmd_manage_iface(adapter, adapter->if_handle,
OP_CONVERT_NORMAL_TO_TUNNEL);
@@ -4795,29 +5063,26 @@ static void be_add_vxlan_port(struct net_device *netdev,
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
- return;
+ goto done;
err:
be_disable_vxlan_offloads(adapter);
+done:
+ kfree(cmd_work);
}
-static void be_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void be_work_del_vxlan_port(struct work_struct *work)
{
- struct be_adapter *adapter = netdev_priv(netdev);
- __be16 port = ti->port;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
- return;
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
+ struct be_adapter *adapter = cmd_work->adapter;
+ __be16 port = cmd_work->info.vxlan_port;
if (adapter->vxlan_port != port)
goto done;
if (adapter->vxlan_port_aliases) {
adapter->vxlan_port_aliases--;
- return;
+ goto out;
}
be_disable_vxlan_offloads(adapter);
@@ -4827,6 +5092,40 @@ static void be_del_vxlan_port(struct net_device *netdev,
be16_to_cpu(port));
done:
adapter->vxlan_port_count--;
+out:
+ kfree(cmd_work);
+}
+
+static void be_cfg_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti,
+ void (*func)(struct work_struct *))
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_cmd_work *cmd_work;
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
+ return;
+
+ cmd_work = be_alloc_work(adapter, func);
+ if (cmd_work) {
+ cmd_work->info.vxlan_port = ti->port;
+ queue_work(be_wq, &cmd_work->work);
+ }
+}
+
+static void be_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
+}
+
+static void be_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
}
static netdev_features_t be_features_check(struct sk_buff *skb,
@@ -4891,6 +5190,16 @@ static int be_get_phys_port_id(struct net_device *dev,
return 0;
}
+static void be_set_rx_mode(struct net_device *dev)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct be_cmd_work *work;
+
+ work = be_alloc_work(adapter, be_work_set_rx_mode);
+ if (work)
+ queue_work(be_wq, &work->work);
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -4984,13 +5293,145 @@ static int be_resume(struct be_adapter *adapter)
return 0;
}
+static void be_soft_reset(struct be_adapter *adapter)
+{
+ u32 val;
+
+ dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
+ val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+ val |= SLIPORT_SOFTRESET_SR_MASK;
+ iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+}
+
+static bool be_err_is_recoverable(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ unsigned long initial_idle_time =
+ msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
+ unsigned long recovery_interval =
+ msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
+ u16 ue_err_code;
+ u32 val;
+
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
+ return false;
+ ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
+ if (ue_err_code == 0)
+ return false;
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
+ ue_err_code);
+
+ if (jiffies - err_rec->probe_time <= initial_idle_time) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from driver load\n",
+ jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (err_rec->last_recovery_time &&
+ (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from last recovery\n",
+ jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (ue_err_code == err_rec->last_err_code) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover from a consecutive TPE error\n");
+ return false;
+ }
+
+ err_rec->last_recovery_time = jiffies;
+ err_rec->last_err_code = ue_err_code;
+ return true;
+}
+
+static int be_tpe_recover(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ int status = -EAGAIN;
+ u32 val;
+
+ switch (err_rec->recovery_state) {
+ case ERR_RECOVERY_ST_NONE:
+ err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
+ err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_DETECT:
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) !=
+ POST_STAGE_RECOVERABLE_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable HW error detected: 0x%x\n", val);
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
+
+ /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
+ * milliseconds before it checks for final error status in
+ * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
+ * If it does, then PF0 initiates a Soft Reset.
+ */
+ if (adapter->pf_num == 0) {
+ err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
+ err_rec->resched_delay = err_rec->ue_to_reset_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+ }
+
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_RESET:
+ if (!be_err_is_recoverable(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to meet recovery criteria\n");
+ status = -EIO;
+ err_rec->resched_delay = 0;
+ break;
+ }
+ be_soft_reset(adapter);
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ err_rec->ue_to_reset_time;
+ break;
+
+ case ERR_RECOVERY_ST_PRE_POLL:
+ err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
+ err_rec->resched_delay = 0;
+ status = 0; /* done */
+ break;
+
+ default:
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ return status;
+}
+
static int be_err_recover(struct be_adapter *adapter)
{
int status;
- /* Error recovery is supported only Lancer as of now */
- if (!lancer_chip(adapter))
- return -EIO;
+ if (!lancer_chip(adapter)) {
+ if (!adapter->error_recovery.recovery_supported ||
+ adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
+ return -EIO;
+ status = be_tpe_recover(adapter);
+ if (status)
+ goto err;
+ }
/* Wait for adapter to reach quiescent state before
* destroying queues
@@ -4999,59 +5440,74 @@ static int be_err_recover(struct be_adapter *adapter)
if (status)
goto err;
+ adapter->flags |= BE_FLAGS_TRY_RECOVERY;
+
be_cleanup(adapter);
status = be_resume(adapter);
if (status)
goto err;
- return 0;
+ adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
+
err:
return status;
}
static void be_err_detection_task(struct work_struct *work)
{
+ struct be_error_recovery *err_rec =
+ container_of(work, struct be_error_recovery,
+ err_detection_work.work);
struct be_adapter *adapter =
- container_of(work, struct be_adapter,
- be_err_detection_work.work);
+ container_of(err_rec, struct be_adapter,
+ error_recovery);
+ u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
struct device *dev = &adapter->pdev->dev;
int recovery_status;
- int delay = ERR_DETECTION_DELAY;
be_detect_error(adapter);
-
- if (be_check_error(adapter, BE_ERROR_HW))
- recovery_status = be_err_recover(adapter);
- else
+ if (!be_check_error(adapter, BE_ERROR_HW))
goto reschedule_task;
+ recovery_status = be_err_recover(adapter);
if (!recovery_status) {
- adapter->recovery_retries = 0;
+ err_rec->recovery_retries = 0;
+ err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
dev_info(dev, "Adapter recovery successful\n");
goto reschedule_task;
- } else if (be_virtfn(adapter)) {
+ } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
+ /* BEx/SH recovery state machine */
+ if (adapter->pf_num == 0 &&
+ err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
+ dev_err(&adapter->pdev->dev,
+ "Adapter recovery in progress\n");
+ resched_delay = err_rec->resched_delay;
+ goto reschedule_task;
+ } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
/* For VFs, check if PF have allocated resources
* every second.
*/
dev_err(dev, "Re-trying adapter recovery\n");
goto reschedule_task;
- } else if (adapter->recovery_retries++ <
- MAX_ERR_RECOVERY_RETRY_COUNT) {
+ } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
+ ERR_RECOVERY_MAX_RETRY_COUNT) {
/* In case of another error during recovery, it takes 30 sec
* for adapter to come out of error. Retry error recovery after
* this time interval.
*/
dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
- delay = ERR_RECOVERY_RETRY_DELAY;
+ resched_delay = ERR_RECOVERY_RETRY_DELAY;
goto reschedule_task;
} else {
dev_err(dev, "Adapter recovery failed\n");
+ dev_err(dev, "Please reboot server to recover\n");
}
return;
+
reschedule_task:
- be_schedule_err_detection(adapter, delay);
+ be_schedule_err_detection(adapter, resched_delay);
}
static void be_log_sfp_info(struct be_adapter *adapter)
@@ -5116,7 +5572,7 @@ static void be_worker(struct work_struct *work)
reschedule:
adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -5256,14 +5712,18 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- spin_lock_init(&adapter->mcc_lock);
+ mutex_init(&adapter->mcc_lock);
+ mutex_init(&adapter->rx_filter_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
INIT_DELAYED_WORK(&adapter->work, be_worker);
- INIT_DELAYED_WORK(&adapter->be_err_detection_work,
+
+ adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
+ adapter->error_recovery.resched_delay = 0;
+ INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
be_err_detection_task);
adapter->rx_fc = true;
@@ -5298,6 +5758,9 @@ static void be_remove(struct pci_dev *pdev)
be_clear(adapter);
+ if (!pci_vfs_assigned(adapter->pdev))
+ be_cmd_reset_function(adapter);
+
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -5454,6 +5917,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_roce_dev_add(adapter);
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+ adapter->error_recovery.probe_time = jiffies;
/* On Die temperature not supported for VF. */
if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5699,6 +6163,8 @@ static struct pci_driver be_driver = {
static int __init be_init_module(void)
{
+ int status;
+
if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
rx_frag_size != 2048) {
printk(KERN_WARNING DRV_NAME
@@ -5712,12 +6178,33 @@ static int __init be_init_module(void)
pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
}
- return pci_register_driver(&be_driver);
+ be_wq = create_singlethread_workqueue("be_wq");
+ if (!be_wq) {
+ pr_warn(DRV_NAME "workqueue creation failed\n");
+ return -1;
+ }
+
+ be_err_recovery_workq =
+ create_singlethread_workqueue("be_err_recover");
+ if (!be_err_recovery_workq)
+ pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
+
+ status = pci_register_driver(&be_driver);
+ if (status) {
+ destroy_workqueue(be_wq);
+ be_destroy_err_recovery_workq();
+ }
+ return status;
}
module_init(be_init_module);
static void __exit be_exit_module(void)
{
pci_unregister_driver(&be_driver);
+
+ be_destroy_err_recovery_workq();
+
+ if (be_wq)
+ destroy_workqueue(be_wq);
}
module_exit(be_exit_module);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 36361f8bf894..262587240c86 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -60,6 +60,8 @@ struct ftgmac100 {
struct ftgmac100_descs *descs;
dma_addr_t descs_dma_addr;
+ struct page *rx_pages[RX_QUEUE_ENTRIES];
+
unsigned int rx_pointer;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
@@ -77,6 +79,9 @@ struct ftgmac100 {
int int_mask_all;
bool use_ncsi;
bool enabled;
+
+ u32 rxdes0_edorr_mask;
+ u32 txdes0_edotr_mask;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -257,10 +262,11 @@ static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
}
-static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
/* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
@@ -298,9 +304,10 @@ static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
}
-static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
@@ -341,18 +348,27 @@ static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
}
+static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ return &priv->rx_pages[rxdes - priv->descs->rxdes];
+}
+
/*
* rxdes2 is not used by hardware. We use it to keep track of page.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
-static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes,
+ struct page *page)
{
- rxdes->rxdes2 = (unsigned int)page;
+ *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
}
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- return (struct page *)rxdes->rxdes2;
+ return *ftgmac100_rxdes_page_slot(priv, rxdes);
}
/******************************************************************************
@@ -382,7 +398,7 @@ ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
if (ftgmac100_rxdes_first_segment(rxdes))
return rxdes;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
}
@@ -453,7 +469,7 @@ static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
@@ -501,7 +517,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
do {
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
unsigned int size;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
@@ -545,10 +561,11 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
/******************************************************************************
* internal functions (transmit descriptor)
*****************************************************************************/
-static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
/* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
txdes->txdes1 = 0;
txdes->txdes2 = 0;
txdes->txdes3 = 0;
@@ -569,9 +586,10 @@ static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
-static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
@@ -690,7 +708,7 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
dev_kfree_skb(skb);
- ftgmac100_txdes_reset(txdes);
+ ftgmac100_txdes_reset(priv, txdes);
ftgmac100_tx_clean_pointer_advance(priv);
@@ -779,9 +797,9 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
return -ENOMEM;
}
- ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_page(priv, rxdes, page);
ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
@@ -791,7 +809,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
if (!page)
@@ -828,7 +846,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
return -ENOMEM;
/* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+ ftgmac100_rxdes_set_end_of_ring(priv,
+ &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
@@ -838,7 +857,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
}
/* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ ftgmac100_txdes_set_end_of_ring(priv,
+ &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
err:
@@ -1055,14 +1075,12 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
}
if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG)) {
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
- status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
if (status & FTGMAC100_INT_NO_RXBUF) {
/* RX buffer unavailable */
@@ -1092,6 +1110,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status;
int err;
err = ftgmac100_alloc_buffers(priv);
@@ -1117,6 +1136,11 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+
+ /* Clear stale interrupts */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
if (netdev->phydev)
phy_start(netdev->phydev);
else if (priv->use_ncsi)
@@ -1166,6 +1190,8 @@ static int ftgmac100_stop(struct net_device *netdev)
napi_disable(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
+ else if (priv->use_ncsi)
+ ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1226,12 +1252,21 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
int i, err = 0;
+ u32 reg;
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -EIO;
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ /* This driver supports the old MDIO interface */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
+ reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
+ };
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1355,9 +1390,18 @@ static int ftgmac100_probe(struct platform_device *pdev)
FTGMAC100_INT_XPKT_ETH |
FTGMAC100_INT_XPKT_LOST |
FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG |
FTGMAC100_INT_RPKT_BUF |
FTGMAC100_INT_NO_RXBUF);
+
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ } else {
+ priv->rxdes0_edorr_mask = BIT(15);
+ priv->txdes0_edotr_mask = BIT(15);
+ }
+
if (pdev->dev.of_node &&
of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
@@ -1367,7 +1411,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
- priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 13408d448b05..a7ce0ac8858a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -134,6 +134,11 @@
#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
/*
+ * Feature Register
+ */
+#define FTGMAC100_REVR_NEW_MDIO_INTERFACE BIT(31)
+
+/*
* Receive buffer size register
*/
#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
@@ -152,6 +157,7 @@
#define FTGMAC100_MACCR_FULLDUP (1 << 8)
#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_PHY_LINK_LEVEL (1 << 11)
#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
#define FTGMAC100_MACCR_RX_ALL (1 << 14)
@@ -189,7 +195,6 @@ struct ftgmac100_txdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
-#define FTGMAC100_TXDES0_EDOTR (1 << 15)
#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
#define FTGMAC100_TXDES0_LTS (1 << 28)
#define FTGMAC100_TXDES0_FTS (1 << 29)
@@ -215,7 +220,6 @@ struct ftgmac100_rxdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
-#define FTGMAC100_RXDES0_EDORR (1 << 15)
#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 692ee248e486..48a033e64423 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -913,13 +913,11 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
- }
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
/* Clear any outstanding interrupt. */
writel(0xffffffff, fep->hwp + FEC_IEVENT);
@@ -2896,7 +2894,7 @@ fec_enet_close(struct net_device *ndev)
* this kind of feature?).
*/
-#define HASH_BITS 6 /* #bits in hash */
+#define FEC_HASH_BITS 6 /* #bits in hash */
#define CRC32_POLY 0xEDB88320
static void set_multicast_list(struct net_device *ndev)
@@ -2944,10 +2942,10 @@ static void set_multicast_list(struct net_device *ndev)
}
}
- /* only upper 6 bits (HASH_BITS) are used
+ /* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in he hash registers
*/
- hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+ hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
if (hash > 31) {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 8ddeedbcef9c..ddf0260176c9 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -192,7 +192,7 @@ struct fman_mac_params {
/* A handle to the FM object this port related to */
void *fm;
/* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+ * MACs; MUST be set to 0 for MACs that don't have
* mdio-irq, or for polling
*/
void *dev_id; /* device cookie used by the exception cbs */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 61fd486c50bb..dc120c148d97 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -60,6 +60,9 @@ module_param(fs_enet_debug, int, 0);
MODULE_PARM_DESC(fs_enet_debug,
"Freescale bitmapped debugging message enable value");
+#define RX_RING_SIZE 32
+#define TX_RING_SIZE 64
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev);
#endif
@@ -79,8 +82,8 @@ static void skb_align(struct sk_buff *skb, int align)
skb_reserve(skb, align - off);
}
-/* NAPI receive function */
-static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
+/* NAPI function */
+static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
struct net_device *dev = fep->ndev;
@@ -90,9 +93,102 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
int received = 0;
u16 pkt_len, sc;
int curidx;
+ int dirtyidx, do_wake, do_restart;
+ int tx_left = TX_RING_SIZE;
- if (budget <= 0)
- return received;
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ /* clear status bits for napi*/
+ (*fep->ops->napi_clear_event)(dev);
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (++fep->tx_free == MAX_SKB_FRAGS)
+ do_wake = 1;
+ tx_left--;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
/*
* First, grab all of the stats for the incoming packet.
@@ -100,10 +196,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
*/
bdp = fep->cur_rx;
- /* clear RX status bits for napi*/
- (*fep->ops->napi_clear_rx_event)(dev);
-
- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
+ received < budget) {
curidx = bdp - fep->rx_bd_base;
/*
@@ -132,21 +226,10 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (sc & BD_ENET_RX_OV)
fep->stats.rx_crc_errors++;
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
+ skbn = fep->rx_skbuff[curidx];
} else {
skb = fep->rx_skbuff[curidx];
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
/*
* Process the incoming frame.
*/
@@ -162,12 +245,30 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
swap(skb, skbn);
+ dma_sync_single_for_cpu(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skbn)
+ if (skbn) {
+ dma_addr_t dma;
+
skb_align(skbn, ENET_RX_ALIGN);
+
+ dma_unmap_single(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ dma = dma_map_single(fep->dev,
+ skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+ CBDW_BUFADDR(bdp, dma);
+ }
}
if (skbn != NULL) {
@@ -182,9 +283,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
}
fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
@@ -197,134 +295,19 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
bdp = fep->rx_bd_base;
(*fep->ops->rx_bd_done)(dev);
-
- if (received >= budget)
- break;
}
fep->cur_rx = bdp;
- if (received < budget) {
+ if (received < budget && tx_left) {
/* done */
napi_complete(napi);
- (*fep->ops->napi_enable_rx)(dev);
- }
- return received;
-}
-
-static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
-{
- struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
- napi_tx);
- struct net_device *dev = fep->ndev;
- cbd_t __iomem *bdp;
- struct sk_buff *skb;
- int dirtyidx, do_wake, do_restart;
- u16 sc;
- int has_tx_work = 0;
-
- spin_lock(&fep->tx_lock);
- bdp = fep->dirty_tx;
-
- /* clear TX status bits for napi*/
- (*fep->ops->napi_clear_tx_event)(dev);
-
- do_wake = do_restart = 0;
- while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
- dirtyidx = bdp - fep->tx_bd_base;
-
- if (fep->tx_free == fep->tx_ring)
- break;
-
- skb = fep->tx_skbuff[dirtyidx];
-
- /*
- * Check for errors.
- */
- if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
- BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
- if (sc & BD_ENET_TX_HB) /* No heartbeat */
- fep->stats.tx_heartbeat_errors++;
- if (sc & BD_ENET_TX_LC) /* Late collision */
- fep->stats.tx_window_errors++;
- if (sc & BD_ENET_TX_RL) /* Retrans limit */
- fep->stats.tx_aborted_errors++;
- if (sc & BD_ENET_TX_UN) /* Underrun */
- fep->stats.tx_fifo_errors++;
- if (sc & BD_ENET_TX_CSL) /* Carrier lost */
- fep->stats.tx_carrier_errors++;
-
- if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
- fep->stats.tx_errors++;
- do_restart = 1;
- }
- } else
- fep->stats.tx_packets++;
-
- if (sc & BD_ENET_TX_READY) {
- dev_warn(fep->dev,
- "HEY! Enet xmit interrupt and TX_READY.\n");
- }
-
- /*
- * Deferred means some collisions occurred during transmit,
- * but we eventually sent the packet OK.
- */
- if (sc & BD_ENET_TX_DEF)
- fep->stats.collisions++;
-
- /* unmap */
- if (fep->mapped_as_page[dirtyidx])
- dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- else
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-
- /*
- * Free the sk buffer associated with this last transmit.
- */
- if (skb) {
- dev_kfree_skb(skb);
- fep->tx_skbuff[dirtyidx] = NULL;
- }
-
- /*
- * Update pointer to next buffer descriptor to be transmitted.
- */
- if ((sc & BD_ENET_TX_WRAP) == 0)
- bdp++;
- else
- bdp = fep->tx_bd_base;
-
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
- */
- if (++fep->tx_free >= MAX_SKB_FRAGS)
- do_wake = 1;
- has_tx_work = 1;
- }
-
- fep->dirty_tx = bdp;
-
- if (do_restart)
- (*fep->ops->tx_restart)(dev);
+ (*fep->ops->napi_enable)(dev);
- if (!has_tx_work) {
- napi_complete(napi);
- (*fep->ops->napi_enable_tx)(dev);
+ return received;
}
- spin_unlock(&fep->tx_lock);
-
- if (do_wake)
- netif_wake_queue(dev);
-
- if (has_tx_work)
- return budget;
- return 0;
+ return budget;
}
/*
@@ -350,18 +333,18 @@ fs_enet_interrupt(int irq, void *dev_id)
nr++;
int_clr_events = int_events;
- int_clr_events &= ~fep->ev_napi_rx;
+ int_clr_events &= ~fep->ev_napi;
(*fep->ops->clear_int_events)(dev, int_clr_events);
if (int_events & fep->ev_err)
(*fep->ops->ev_error)(dev, int_events);
- if (int_events & fep->ev_rx) {
+ if (int_events & fep->ev) {
napi_ok = napi_schedule_prep(&fep->napi);
- (*fep->ops->napi_disable_rx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+ (*fep->ops->napi_disable)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi);
/* NOTE: it is possible for FCCs in NAPI mode */
/* to submit a spurious interrupt while in poll */
@@ -369,17 +352,6 @@ fs_enet_interrupt(int irq, void *dev_id)
__napi_schedule(&fep->napi);
}
- if (int_events & fep->ev_tx) {
- napi_ok = napi_schedule_prep(&fep->napi_tx);
-
- (*fep->ops->napi_disable_tx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
- if (napi_ok)
- __napi_schedule(&fep->napi_tx);
- }
}
handled = nr > 0;
@@ -659,7 +631,8 @@ static void fs_timeout(struct net_device *dev)
}
phy_start(dev->phydev);
- wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ wake = fep->tx_free >= MAX_SKB_FRAGS &&
+ !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
if (wake)
@@ -751,11 +724,10 @@ static int fs_enet_open(struct net_device *dev)
int err;
/* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_rx_napi */
+ /* not doing this, will cause a crash in fs_enet_napi */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
- napi_enable(&fep->napi_tx);
/* Install our interrupt handler. */
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
@@ -763,7 +735,6 @@ static int fs_enet_open(struct net_device *dev)
if (r != 0) {
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return -EINVAL;
}
@@ -771,7 +742,6 @@ static int fs_enet_open(struct net_device *dev)
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return err;
}
phy_start(dev->phydev);
@@ -789,7 +759,6 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -861,6 +830,44 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
fep->msg_enable = value;
}
+static int fs_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fpi->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fs_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fpi->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -872,6 +879,8 @@ static const struct ethtool_ops fs_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_tunable = fs_get_tunable,
+ .set_tunable = fs_set_tunable,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -939,8 +948,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
- fpi->rx_ring = 32;
- fpi->tx_ring = 64;
+ fpi->rx_ring = RX_RING_SIZE;
+ fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -1024,8 +1033,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
- netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
- netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
+ netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index e29f54a35210..fee24c822fad 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -81,12 +81,9 @@ struct fs_ops {
void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
- void (*napi_clear_rx_event)(struct net_device *dev);
- void (*napi_enable_rx)(struct net_device *dev);
- void (*napi_disable_rx)(struct net_device *dev);
- void (*napi_clear_tx_event)(struct net_device *dev);
- void (*napi_enable_tx)(struct net_device *dev);
- void (*napi_disable_tx)(struct net_device *dev);
+ void (*napi_clear_event)(struct net_device *dev);
+ void (*napi_enable)(struct net_device *dev);
+ void (*napi_disable)(struct net_device *dev);
void (*rx_bd_done)(struct net_device *dev);
void (*tx_kickstart)(struct net_device *dev);
u32 (*get_int_events)(struct net_device *dev);
@@ -122,7 +119,6 @@ struct phy_info {
struct fs_enet_private {
struct napi_struct napi;
- struct napi_struct napi_tx;
struct device *dev; /* pointer back to the device (must be initialized first) */
struct net_device *ndev;
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -152,10 +148,8 @@ struct fs_enet_private {
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
- u32 ev_napi_rx; /* mask of NAPI rx events */
- u32 ev_napi_tx; /* mask of NAPI rx events */
- u32 ev_rx; /* rx event mask */
- u32 ev_tx; /* tx event mask */
+ u32 ev_napi; /* mask of NAPI events */
+ u32 ev; /* event mask */
u32 ev_err; /* error event mask */
u16 bd_rx_empty; /* mask of BD rx empty */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index d71761a34022..120c758f5d01 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
int ret = -EINVAL;
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
goto out;
fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
@@ -124,10 +124,8 @@ out:
return ret;
}
-#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
-#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
-#define FCC_RX_EVENT (FCC_ENET_RXF)
-#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
+#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
@@ -137,10 +135,8 @@ static int setup_data(struct net_device *dev)
if (do_pd_setup(fep) != 0)
return -EINVAL;
- fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FCC_RX_EVENT;
- fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_napi = FCC_NAPI_EVENT_MSK;
+ fep->ev = FCC_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
return 0;
@@ -424,52 +420,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+ W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+ S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+ C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -595,12 +567,9 @@ const struct fs_ops fs_fcc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 35a318ed3a62..777beffa1e1e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
@@ -109,10 +109,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
-#define FEC_RX_EVENT (FEC_ENET_RXF)
-#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
+#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
FEC_ENET_BABT | FEC_ENET_EBERR)
@@ -126,10 +124,8 @@ static int setup_data(struct net_device *dev)
fep->fec.hthi = 0;
fep->fec.htlo = 0;
- fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FEC_RX_EVENT;
- fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_napi = FEC_NAPI_EVENT_MSK;
+ fep->ev = FEC_EVENT;
fep->ev_err = FEC_ERR_EVENT_MSK;
return 0;
@@ -396,52 +392,28 @@ static void stop(struct net_device *dev)
}
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+ FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+ FS(fecp, imask, FEC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+ FC(fecp, imask, FEC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -513,12 +485,9 @@ const struct fs_ops fs_fec_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index e8b9c33d35b4..15abd37d70e3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
@@ -115,10 +115,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
-#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
-#define SCC_RX_EVENT (SCCE_ENET_RXF)
-#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
+#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
static int setup_data(struct net_device *dev)
@@ -130,10 +128,8 @@ static int setup_data(struct net_device *dev)
fep->scc.hthi = 0;
fep->scc.htlo = 0;
- fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = SCC_RX_EVENT;
- fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
+ fep->ev_napi = SCC_NAPI_EVENT_MSK;
+ fep->ev = SCC_EVENT | SCCE_ENET_TXE;
fep->ev_err = SCC_ERR_EVENT_MSK;
return 0;
@@ -379,52 +375,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+ W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+ S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+ C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -497,12 +469,9 @@ const struct fs_ops fs_scc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f3c63dce1e30..446c7b374ff5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -195,7 +195,7 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
return 0;
}
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
/*
* Return the TBIPA address, starting from the address
* of the mapped GFAR MDIO registers (struct gfar)
@@ -228,7 +228,7 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
}
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
/*
* Return the TBIPAR address for a QE MDIO node, starting from the address
* of the mapped MII registers (struct fsl_pq_mii)
@@ -306,7 +306,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
#endif
static const struct of_device_id fsl_pq_mdio_match[] = {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
{
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
@@ -344,7 +344,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
},
},
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
{
.compatible = "fsl,ucc-mdio",
.data = &(struct fsl_pq_mdio_data) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5bf1ade28315..186ef8f16c80 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3756,7 +3756,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock propperty\n");
+ pr_err("invalid rx-clock property\n");
return -EINVAL;
}
ug_info->uf_info.rx_clock = *prop;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 7b8fe866f603..e03b30c60dcf 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -271,11 +271,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
goto err_ioremap;
}
- if (of_get_property(pdev->dev.of_node,
- "little-endian", NULL))
- priv->is_little_endian = true;
- else
- priv->is_little_endian = false;
+ priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
+ "little-endian");
ret = of_mdiobus_register(bus, np);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0c4afe95ef54..39778892b3b3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -600,7 +600,7 @@ static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
{
struct hip04_priv *priv;
@@ -755,13 +755,13 @@ static void hip04_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
-static struct ethtool_ops hip04_ethtool_ops = {
+static const struct ethtool_ops hip04_ethtool_ops = {
.get_coalesce = hip04_get_coalesce,
.set_coalesce = hip04_set_coalesce,
.get_drvinfo = hip04_get_drvinfo,
};
-static struct net_device_ops hip04_netdev_ops = {
+static const struct net_device_ops hip04_netdev_ops = {
.ndo_open = hip04_mac_open,
.ndo_stop = hip04_mac_stop,
.ndo_get_stats = hip04_get_stats,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index b5d7ad0252a0..ced185962ef8 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -699,7 +699,7 @@ static int hisi_femac_net_ioctl(struct net_device *dev,
return phy_mii_ioctl(dev->phydev, ifreq, cmd);
}
-static struct ethtool_ops hisi_femac_ethtools_ops = {
+static const struct ethtool_ops hisi_femac_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -940,8 +940,8 @@ static int hisi_femac_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-int hisi_femac_drv_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
@@ -957,7 +957,7 @@ int hisi_femac_drv_suspend(struct platform_device *pdev,
return 0;
}
-int hisi_femac_drv_resume(struct platform_device *pdev)
+static int hisi_femac_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 275618bb4646..e69a6bed31a9 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -750,7 +750,7 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
.ndo_set_mac_address = hix5hd2_net_set_mac_address,
};
-static struct ethtool_ops hix5hd2_ethtools_ops = {
+static const struct ethtool_ops hix5hd2_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5c8afe1a5ccb..a834774fdb02 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -684,8 +684,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
if (!phy || IS_ERR(phy))
return -EIO;
- if (mdio->irq)
- phy->irq = mdio->irq[addr];
+ phy->irq = mdio->irq[addr];
/* All data is now stored in the phy struct;
* register it
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index afb5daa3721d..eb448dff7564 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
@@ -115,10 +116,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
res);
- if (IS_ERR(dsaf_dev->sc_base)) {
- dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ if (IS_ERR(dsaf_dev->sc_base))
return PTR_ERR(dsaf_dev->sc_base);
- }
res = platform_get_resource(pdev, IORESOURCE_MEM,
res_idx++);
@@ -129,10 +128,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
res);
- if (IS_ERR(dsaf_dev->sds_base)) {
- dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ if (IS_ERR(dsaf_dev->sds_base))
return PTR_ERR(dsaf_dev->sds_base);
- }
} else {
dsaf_dev->sub_ctrl = syscon;
}
@@ -147,10 +144,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dsaf_dev->ppe_base)) {
- dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+ if (IS_ERR(dsaf_dev->ppe_base))
return PTR_ERR(dsaf_dev->ppe_base);
- }
dsaf_dev->ppe_paddr = res->start;
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
@@ -166,10 +161,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dsaf_dev->io_base)) {
- dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+ if (IS_ERR(dsaf_dev->io_base))
return PTR_ERR(dsaf_dev->io_base);
- }
}
ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
@@ -2781,6 +2774,89 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
+/**
+ * hns_dsaf_roce_reset - reset dsaf and roce
+ * @dsaf_fwnode: Pointer to framework node for the dasf
+ * @enable: false - request reset , true - drop reset
+ * retuen 0 - success , negative -fail
+ */
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
+{
+ struct dsaf_device *dsaf_dev;
+ struct platform_device *pdev;
+ u32 mp;
+ u32 sl;
+ u32 credit;
+ int i;
+ const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ };
+ const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ };
+
+ if (!is_of_node(dsaf_fwnode)) {
+ pr_err("hisi_dsaf: Only support DT node!\n");
+ return -EINVAL;
+ }
+ pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+ dsaf_dev = dev_get_drvdata(&pdev->dev);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
+ dsaf_dev->ae_dev.name);
+ return -ENODEV;
+ }
+
+ if (!enable) {
+ /* Reset rocee-channels in dsaf and rocee */
+ hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false);
+ hns_dsaf_roce_srst(dsaf_dev, false);
+ } else {
+ /* Configure dsaf tx roce correspond to port map and sl map */
+ mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(mp, 7 << i * 3, i * 3,
+ port_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
+
+ sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(sl, 3 << i * 2, i * 2,
+ sl_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
+
+ /* De-reset rocee-channels in dsaf and rocee */
+ hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true);
+ msleep(SRST_TIME_INTERVAL);
+ hns_dsaf_roce_srst(dsaf_dev, true);
+
+ /* Eanble dsaf channel rocee credit */
+ credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(hns_dsaf_roce_reset);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 1daf018d9071..f3681d566ae6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -43,6 +43,32 @@ struct hns_mac_cb;
#define DSAF_PRIO_NR 8
#define DSAF_REG_PER_ZONE 3
+#define DSAF_ROCE_CREDIT_CHN 8
+#define DSAF_ROCE_CHAN_MODE 3
+
+enum dsaf_roce_port_mode {
+ DSAF_ROCE_6PORT_MODE,
+ DSAF_ROCE_4PORT_MODE,
+ DSAF_ROCE_2PORT_MODE,
+ DSAF_ROCE_CHAN_MODE_NUM,
+};
+
+enum dsaf_roce_port_num {
+ DSAF_ROCE_PORT_0,
+ DSAF_ROCE_PORT_1,
+ DSAF_ROCE_PORT_2,
+ DSAF_ROCE_PORT_3,
+ DSAF_ROCE_PORT_4,
+ DSAF_ROCE_PORT_5,
+};
+
+enum dsaf_roce_qos_sl {
+ DSAF_ROCE_SL_0,
+ DSAF_ROCE_SL_1,
+ DSAF_ROCE_SL_2,
+ DSAF_ROCE_SL_3,
+};
+
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -419,6 +445,10 @@ int hns_dsaf_get_mac_entry_by_index(
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable);
+
+void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable);
+
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 611b67b6f450..36b9f791cf2f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -231,6 +231,42 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ * @enable: false - request reset , true - drop reset
+ */
+void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable)
+{
+ u32 reg_addr;
+
+ if (!enable)
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, msk);
+}
+
+void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable)
+{
+ if (!enable) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
+ } else {
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
+ msleep(20);
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
+ }
+}
+
static void
hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
u32 port, bool dereset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 235f74444b1d..13c16ab7be48 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -77,6 +77,12 @@
#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
+#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
+#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
+#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
+#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
+#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
@@ -133,6 +139,8 @@
#define DSAF_ROCEE_INT_STS_0_REG 0x200
#define DSAFV2_SERDES_LBK_0_REG 0x220
#define DSAF_PAUSE_CFG_REG 0x240
+#define DSAF_ROCE_PORT_MAP_REG 0x2A0
+#define DSAF_ROCE_SL_MAP_REG 0x2A4
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -178,6 +186,7 @@
#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380
#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
@@ -796,6 +805,9 @@
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+#define DSAF_CHNS_MASK 0x3f000
+#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2
+#define SRST_TIME_INTERVAL 20
#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d7e1f8c7ae92..059aaeda46b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -994,10 +994,10 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
- if (priv->phy) {
+ if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
- state = priv->phy->link;
+ state = ndev->phydev->link;
}
state = state && h->dev->ops->get_status(h);
@@ -1022,7 +1022,6 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
- struct hns_nic_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1046,8 +1045,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
- priv->phy = phy_dev;
-
return 0;
}
@@ -1224,8 +1221,8 @@ static int hns_nic_net_up(struct net_device *ndev)
if (ret)
goto out_start_err;
- if (priv->phy)
- phy_start(priv->phy);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
clear_bit(NIC_STATE_DOWN, &priv->state);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -1259,8 +1256,8 @@ static void hns_nic_net_down(struct net_device *ndev)
netif_tx_disable(ndev);
priv->link = 0;
- if (priv->phy)
- phy_stop(priv->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
ops = priv->ae_handle->dev->ops;
@@ -1359,8 +1356,7 @@ static void hns_nic_net_timeout(struct net_device *ndev)
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
if (!netif_running(netdev))
return -EINVAL;
@@ -2017,9 +2013,8 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
- if (priv->phy)
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
if (!IS_ERR_OR_NULL(priv->ae_handle))
hnae_put_handle(priv->ae_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 44bb3015eed3..5b412de350aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -59,7 +59,6 @@ struct hns_nic_priv {
u32 port_id;
int phy_mode;
int phy_led_val;
- struct phy_device *phy;
struct net_device *netdev;
struct device *dev;
struct hnae_handle *ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab33487a5321..47e59bbfd061 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -48,9 +48,9 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
- if (priv->phy) {
- if (!genphy_read_status(priv->phy))
- link_stat = priv->phy->link;
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
else
link_stat = 0;
}
@@ -64,15 +64,14 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
}
static void hns_get_mdix_mode(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
int mdix_ctrl, mdix, retval, is_resolved;
- struct hns_nic_priv *priv = netdev_priv(net_dev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = net_dev->phydev;
if (!phy_dev || !phy_dev->mdio.bus) {
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
return;
}
@@ -89,35 +88,35 @@ static void hns_get_mdix_mode(struct net_device *net_dev,
switch (mdix_ctrl) {
case 0x0:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
break;
case 0x1:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
break;
case 0x3:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
break;
default:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
break;
}
if (!is_resolved)
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
else if (mdix)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
/**
- *hns_nic_get_settings - implement ethtool get settings
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -125,6 +124,7 @@ static int hns_nic_get_settings(struct net_device *net_dev,
int ret;
u8 duplex;
u16 speed;
+ u32 supported, advertising;
if (!priv || !priv->ae_handle)
return -ESRCH;
@@ -139,38 +139,43 @@ static int hns_nic_get_settings(struct net_device *net_dev,
return -EINVAL;
}
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
/* When there is no phy, autoneg is off. */
- cmd->autoneg = false;
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.autoneg = false;
+ cmd->base.cmd = speed;
+ cmd->base.duplex = duplex;
- if (priv->phy)
- (void)phy_ethtool_gset(priv->phy, cmd);
+ if (net_dev->phydev)
+ (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
- ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- if (cmd->autoneg)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
- cmd->supported |= h->if_support;
+ supported |= h->if_support;
if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
} else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
}
switch (h->media_type) {
case HNAE_MEDIA_TYPE_FIBER:
- cmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case HNAE_MEDIA_TYPE_COPPER:
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
case HNAE_MEDIA_TYPE_UNKNOWN:
default:
@@ -178,23 +183,27 @@ static int hns_nic_get_settings(struct net_device *net_dev,
}
if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
- cmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
hns_get_mdix_mode(net_dev, cmd);
return 0;
}
/**
- *hns_nic_set_settings - implement ethtool set settings
+ *hns_nic_set_link_settings - implement ethtool set link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -208,24 +217,25 @@ static int hns_nic_set_settings(struct net_device *net_dev,
return -ENODEV;
h = priv->ae_handle;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
- cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
return -EINVAL;
- if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if ((speed != SPEED_10 && speed != SPEED_100 &&
- speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL))
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
} else {
netdev_err(net_dev, "Not supported!");
@@ -233,7 +243,7 @@ static int hns_nic_set_settings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
- h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
return 0;
}
@@ -305,7 +315,7 @@ static int __lb_setup(struct net_device *ndev,
{
int ret = 0;
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = ndev->phydev;
struct hnae_handle *h = priv->ae_handle;
switch (loop) {
@@ -910,7 +920,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
ETH_GSTRING_LEN);
buff += ETH_GSTRING_LEN;
- if ((priv->phy) && (!priv->phy->is_c45))
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
ETH_GSTRING_LEN);
@@ -996,7 +1006,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
- if ((!priv->phy) || (priv->phy->is_c45))
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
cnt--;
return cnt;
@@ -1015,8 +1025,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
@@ -1039,7 +1048,7 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
int ret;
if (phy_dev)
@@ -1159,8 +1168,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
static int hns_nic_nway_reset(struct net_device *netdev)
{
int ret = 0;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
if (phy)
@@ -1264,11 +1272,9 @@ static int hns_get_rxnfc(struct net_device *netdev,
return 0;
}
-static struct ethtool_ops hns_ethtool_ops = {
+static const struct ethtool_ops hns_ethtool_ops = {
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
- .get_settings = hns_nic_get_settings,
- .set_settings = hns_nic_set_settings,
.get_ringparam = hns_get_ringparam,
.get_pauseparam = hns_get_pauseparam,
.set_pauseparam = hns_set_pauseparam,
@@ -1288,6 +1294,8 @@ static struct ethtool_ops hns_ethtool_ops = {
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
.get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index befb4ac3e2b0..ce235b776793 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -89,10 +89,10 @@ static char version[] __initdata =
#define DEB(x,y) if (i596_debug & (x)) y
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME16x_NET)
#define ENABLE_MVME16x_NET
#endif
-#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#if IS_ENABLED(CONFIG_BVME6000_NET)
#define ENABLE_BVME6000_NET
#endif
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7af09cbc53f0..8f139197f1aa 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2780,7 +2780,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
dev->emac_irq = irq_of_parse_and_map(np, 0);
dev->wol_irq = irq_of_parse_and_map(np, 1);
- if (dev->emac_irq == NO_IRQ) {
+ if (!dev->emac_irq) {
printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
goto err_free;
}
@@ -2943,9 +2943,9 @@ static int emac_probe(struct platform_device *ofdev)
err_reg_unmap:
iounmap(dev->emacp);
err_irq_unmap:
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
err_free:
free_netdev(ndev);
@@ -2987,9 +2987,9 @@ static int emac_remove(struct platform_device *ofdev)
emac_dbg_unregister(dev);
iounmap(dev->emacp);
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fdb5cdb3cd15..aaf6fec566b5 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -597,9 +597,8 @@ static int mal_probe(struct platform_device *ofdev)
mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
- if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
- mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
- mal->rxde_irq == NO_IRQ) {
+ if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
+ !mal->txde_irq || !mal->rxde_irq) {
printk(KERN_ERR
"mal%d: failed to map interrupts !\n", index);
err = -ENODEV;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 88f3c85fb04a..bfe17d9c022d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- send_request_unmap(adapter, ltb->map_id);
+ if (!adapter->failover)
+ send_request_unmap(adapter, ltb->map_id);
}
static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
@@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_tx_stop_all_queues(netdev);
+ if (!adapter->failover)
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1422,7 +1424,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
scrq = adapter->tx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_tx_irq_failed;
@@ -1442,7 +1444,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
scrq = adapter->rx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
@@ -2777,12 +2779,6 @@ static void handle_control_ras_rsp(union ibmvnic_crq *crq,
}
}
-static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
loff_t *ppos)
{
@@ -2834,7 +2830,7 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
static const struct file_operations trace_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_read,
};
@@ -2884,7 +2880,7 @@ static ssize_t paused_write(struct file *file, const char __user *user_buf,
static const struct file_operations paused_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = paused_read,
.write = paused_write,
};
@@ -2932,7 +2928,7 @@ static ssize_t tracing_write(struct file *file, const char __user *user_buf,
static const struct file_operations tracing_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = tracing_read,
.write = tracing_write,
};
@@ -2985,7 +2981,7 @@ static ssize_t error_level_write(struct file *file, const char __user *user_buf,
static const struct file_operations error_level_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = error_level_read,
.write = error_level_write,
};
@@ -3036,7 +3032,7 @@ static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
static const struct file_operations trace_level_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_level_read,
.write = trace_level_write,
};
@@ -3089,7 +3085,7 @@ static ssize_t trace_buff_size_write(struct file *file,
static const struct file_operations trace_size_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_buff_size_read,
.write = trace_buff_size_write,
};
@@ -3280,6 +3276,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
rc = ibmvnic_send_crq_init(adapter);
if (rc)
dev_err(dev, "Error sending init rc=%ld\n", rc);
+ } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
+ dev_info(dev, "Backing device failover detected\n");
+ netif_carrier_off(netdev);
+ adapter->failover = true;
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
@@ -3615,8 +3615,18 @@ static void handle_crq_init_rsp(struct work_struct *work)
struct device *dev = &adapter->vdev->dev;
struct net_device *netdev = adapter->netdev;
unsigned long timeout = msecs_to_jiffies(30000);
+ bool restart = false;
int rc;
+ if (adapter->failover) {
+ release_sub_crqs(adapter);
+ if (netif_running(netdev)) {
+ netif_tx_disable(netdev);
+ ibmvnic_close(netdev);
+ restart = true;
+ }
+ }
+
send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -3645,6 +3655,17 @@ static void handle_crq_init_rsp(struct work_struct *work)
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ if (adapter->failover) {
+ adapter->failover = false;
+ if (restart) {
+ rc = ibmvnic_open(netdev);
+ if (rc)
+ goto restart_failed;
+ }
+ netif_carrier_on(netdev);
+ return;
+ }
+
rc = register_netdev(netdev);
if (rc) {
dev_err(dev,
@@ -3655,6 +3676,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
return;
+restart_failed:
+ dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
register_failed:
release_sub_crqs(adapter);
task_failed:
@@ -3692,6 +3715,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
+ adapter->failover = false;
ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -3721,6 +3745,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(&dev->dev, "Couldn't map stats buffer\n");
+ rc = -ENOMEM;
goto free_crq;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index e82898fd518e..bfc84c7d0e11 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -830,6 +830,7 @@ enum ibmvfc_crq_format {
IBMVNIC_CRQ_INIT = 0x01,
IBMVNIC_CRQ_INIT_COMPLETE = 0x02,
IBMVNIC_PARTITION_MIGRATED = 0x06,
+ IBMVNIC_DEVICE_FAILOVER = 0x08,
};
struct ibmvnic_crq_queue {
@@ -1047,4 +1048,5 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ bool failover;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 2e1b17ad52a3..ad03763e009a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -334,7 +334,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_err("ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
e_info("registered PHC clock\n");
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index c4cf08dcf5af..4d19e46f7c55 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -240,9 +240,7 @@ struct fm10k_iov_data {
struct fm10k_vf_info vf_info[0];
};
-#define fm10k_vxlan_port_for_each(vp, intfc) \
- list_for_each_entry(vp, &(intfc)->vxlan_port, list)
-struct fm10k_vxlan_port {
+struct fm10k_udp_port {
struct list_head list;
sa_family_t sa_family;
__be16 port;
@@ -335,8 +333,9 @@ struct fm10k_intfc {
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
- /* VXLAN port tracking information */
+ /* UDP encapsulation port tracking information */
struct list_head vxlan_port;
+ struct list_head geneve_port;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
@@ -458,7 +457,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
-u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
@@ -496,7 +495,6 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
-u32 fm10k_get_reta_size(struct net_device *netdev);
void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
@@ -509,7 +507,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
- int vf_idx, u16 vid, u8 qos);
+ int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index d6baaea8bc7c..dd95ac4f4c64 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -207,6 +207,9 @@ s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
+ if (FM10K_REMOVED(hw->hw_addr))
+ return 0;
+
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 50f71e997448..d51f9c7a47ff 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -34,7 +34,7 @@ u32 fm10k_read_reg(struct fm10k_hw *hw, int reg);
/* write operations, indexed using DWORDS */
#define fm10k_write_reg(hw, reg, val) \
do { \
- u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u32 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!FM10K_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
@@ -42,7 +42,7 @@ do { \
/* Switch register write operations, index using DWORDS */
#define fm10k_write_sw_reg(hw, reg, val) \
do { \
- u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \
+ u32 __iomem *sw_addr = READ_ONCE((hw)->sw_addr); \
if (!FM10K_REMOVED(sw_addr)) \
writel((val), &sw_addr[(reg)]); \
} while (0)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c04cbe9c9f7c..5241e0873397 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -966,7 +966,7 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
return 0;
}
-u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
@@ -1182,6 +1182,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 47f0743ec03b..5f4dac0d36ef 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -51,7 +51,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
int i;
/* if there is no iov_data then there is no mailbox to process */
- if (!ACCESS_ONCE(interface->iov_data))
+ if (!READ_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
@@ -99,7 +99,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
int i;
/* if there is no iov_data then there is no mailbox to process */
- if (!ACCESS_ONCE(interface->iov_data))
+ if (!READ_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
@@ -445,7 +445,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -460,6 +460,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
+ /* VF VLAN Protocol part to default is unsupported */
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e9767b6366a8..5de937852436 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -56,7 +56,8 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_driver_name);
fm10k_dbg_init();
@@ -651,11 +652,11 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
{
struct fm10k_intfc *interface = netdev_priv(skb->dev);
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *vxlan_port;
/* we can only offload a vxlan if we recognize it as such */
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
+ struct fm10k_udp_port, list);
if (!vxlan_port)
return NULL;
@@ -1128,13 +1129,24 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
return ring->stats.packets;
}
-u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
+/**
+ * fm10k_get_tx_pending - how many Tx descriptors not processed
+ * @ring: the ring structure
+ * @in_sw: is tx_pending being checked in SW or in HW?
+ */
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
{
struct fm10k_intfc *interface = ring->q_vector->interface;
struct fm10k_hw *hw = &interface->hw;
+ u32 head, tail;
- u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
- u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
+ if (likely(in_sw)) {
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
+ } else {
+ head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
+ tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
+ }
return ((head <= tail) ? tail : tail + ring->count) - head;
}
@@ -1143,7 +1155,7 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
{
u32 tx_done = fm10k_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = fm10k_get_tx_pending(tx_ring);
+ u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
clear_check_for_tx_hang(tx_ring);
@@ -1397,7 +1409,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* that the calculation will never get below a 1. The bit shift
* accounts for changes in the ITR due to PCIe link speed.
*/
- itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
+ itr_round = READ_ONCE(ring_container->itr_scale) + 8;
avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
@@ -1473,7 +1485,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
/* re-enable the q_vector */
fm10k_qv_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 20a5bbe3f536..05629381be6b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -384,129 +384,171 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface)
}
/**
- * fm10k_del_vxlan_port_all
+ * fm10k_free_udp_port_info
* @interface: board private structure
*
- * This function frees the entire vxlan_port list
+ * This function frees both geneve_port and vxlan_port structures
**/
-static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface)
+static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
{
- struct fm10k_vxlan_port *vxlan_port;
-
- /* flush all entries from list */
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
- while (vxlan_port) {
- list_del(&vxlan_port->list);
- kfree(vxlan_port);
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port,
- list);
+ struct fm10k_udp_port *port;
+
+ /* flush all entries from vxlan list */
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port, list);
+ while (port) {
+ list_del(&port->list);
+ kfree(port);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port,
+ list);
+ }
+
+ /* flush all entries from geneve list */
+ port = list_first_entry_or_null(&interface->geneve_port,
+ struct fm10k_udp_port, list);
+ while (port) {
+ list_del(&port->list);
+ kfree(port);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port,
+ list);
}
}
/**
- * fm10k_restore_vxlan_port
+ * fm10k_restore_udp_port_info
* @interface: board private structure
*
- * This function restores the value in the tunnel_cfg register after reset
+ * This function restores the value in the tunnel_cfg register(s) after reset
**/
-static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
+static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *port;
/* only the PF supports configuring tunnels */
if (hw->mac.type != fm10k_mac_pf)
return;
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port, list);
/* restore tunnel configuration register */
fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
- (vxlan_port ? ntohs(vxlan_port->port) : 0) |
+ (port ? ntohs(port->port) : 0) |
(ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
+
+ port = list_first_entry_or_null(&interface->geneve_port,
+ struct fm10k_udp_port, list);
+
+ /* restore Geneve tunnel configuration register */
+ fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
+ (port ? ntohs(port->port) : 0));
+}
+
+static struct fm10k_udp_port *
+fm10k_remove_tunnel_port(struct list_head *ports,
+ struct udp_tunnel_info *ti)
+{
+ struct fm10k_udp_port *port;
+
+ list_for_each_entry(port, ports, list) {
+ if ((port->port == ti->port) &&
+ (port->sa_family == ti->sa_family)) {
+ list_del(&port->list);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+static void fm10k_insert_tunnel_port(struct list_head *ports,
+ struct udp_tunnel_info *ti)
+{
+ struct fm10k_udp_port *port;
+
+ /* remove existing port entry from the list so that the newest items
+ * are always at the tail of the list.
+ */
+ port = fm10k_remove_tunnel_port(ports, ti);
+ if (!port) {
+ port = kmalloc(sizeof(*port), GFP_ATOMIC);
+ if (!port)
+ return;
+ port->port = ti->port;
+ port->sa_family = ti->sa_family;
+ }
+
+ list_add_tail(&port->list, ports);
}
/**
- * fm10k_add_vxlan_port
+ * fm10k_udp_tunnel_add
* @netdev: network interface device structure
* @ti: Tunnel endpoint information
*
- * This function is called when a new VXLAN interface has added a new port
- * number to the range that is currently in use for VXLAN. The new port
- * number is always added to the tail so that the port number list should
- * match the order in which the ports were allocated. The head of the list
- * is always used as the VXLAN port number for offloads.
+ * This function is called when a new UDP tunnel port has been added.
+ * Due to hardware restrictions, only one port per type can be offloaded at
+ * once.
**/
-static void fm10k_add_vxlan_port(struct net_device *dev,
+static void fm10k_udp_tunnel_add(struct net_device *dev,
struct udp_tunnel_info *ti)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_vxlan_port *vxlan_port;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
/* only the PF supports configuring tunnels */
if (interface->hw.mac.type != fm10k_mac_pf)
return;
- /* existing ports are pulled out so our new entry is always last */
- fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == ti->port) &&
- (vxlan_port->sa_family == ti->sa_family)) {
- list_del(&vxlan_port->list);
- goto insert_tail;
- }
- }
-
- /* allocate memory to track ports */
- vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
- if (!vxlan_port)
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ fm10k_insert_tunnel_port(&interface->vxlan_port, ti);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ fm10k_insert_tunnel_port(&interface->geneve_port, ti);
+ break;
+ default:
return;
- vxlan_port->port = ti->port;
- vxlan_port->sa_family = ti->sa_family;
-
-insert_tail:
- /* add new port value to list */
- list_add_tail(&vxlan_port->list, &interface->vxlan_port);
+ }
- fm10k_restore_vxlan_port(interface);
+ fm10k_restore_udp_port_info(interface);
}
/**
- * fm10k_del_vxlan_port
+ * fm10k_udp_tunnel_del
* @netdev: network interface device structure
* @ti: Tunnel endpoint information
*
- * This function is called when a new VXLAN interface has freed a port
- * number from the range that is currently in use for VXLAN. The freed
- * port is removed from the list and the new head is used to determine
- * the port number for offloads.
+ * This function is called when a new UDP tunnel port is deleted. The freed
+ * port will be removed from the list, then we reprogram the offloaded port
+ * based on the head of the list.
**/
-static void fm10k_del_vxlan_port(struct net_device *dev,
+static void fm10k_udp_tunnel_del(struct net_device *dev,
struct udp_tunnel_info *ti)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *port = NULL;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
if (interface->hw.mac.type != fm10k_mac_pf)
return;
- /* find the port in the list and free it */
- fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == ti->port) &&
- (vxlan_port->sa_family == ti->sa_family)) {
- list_del(&vxlan_port->list);
- kfree(vxlan_port);
- break;
- }
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ port = fm10k_remove_tunnel_port(&interface->geneve_port, ti);
+ break;
+ default:
+ return;
}
- fm10k_restore_vxlan_port(interface);
+ /* if we did remove a port we need to free its memory */
+ kfree(port);
+
+ fm10k_restore_udp_port_info(interface);
}
/**
@@ -555,7 +597,6 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
- /* update VXLAN port configuration */
udp_tunnel_get_rx_info(netdev);
fm10k_up(interface);
@@ -591,7 +632,7 @@ int fm10k_close(struct net_device *netdev)
fm10k_qv_free_irq(interface);
- fm10k_del_vxlan_port_all(interface);
+ fm10k_free_udp_port_info(interface);
fm10k_free_all_tx_resources(interface);
fm10k_free_all_rx_resources(interface);
@@ -1055,7 +1096,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
interface->xcast_mode = xcast_mode;
/* Restore tunnel configuration */
- fm10k_restore_vxlan_port(interface);
+ fm10k_restore_udp_port_info(interface);
}
void fm10k_reset_rx_state(struct fm10k_intfc *interface)
@@ -1098,7 +1139,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = ACCESS_ONCE(interface->rx_ring[i]);
+ ring = READ_ONCE(interface->rx_ring[i]);
if (!ring)
continue;
@@ -1114,7 +1155,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
}
for (i = 0; i < interface->num_tx_queues; i++) {
- ring = ACCESS_ONCE(interface->tx_ring[i]);
+ ring = READ_ONCE(interface->tx_ring[i]);
if (!ring)
continue;
@@ -1299,7 +1340,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel);
+ struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
struct net_device *sdev = priv;
@@ -1375,8 +1416,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
- .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
- .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
+ .ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
+ .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 774a5654bf42..b1a2f8437d59 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -62,7 +62,7 @@ u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
{
- u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (FM10K_REMOVED(hw_addr))
@@ -133,7 +133,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
/* check the real address space to see if we've recovered */
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
- if ((~value)) {
+ if (~value) {
interface->hw.hw_addr = interface->uc_addr;
netif_device_attach(netdev);
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
@@ -734,15 +734,15 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
u64 rdba = ring->dma;
struct fm10k_hw *hw = &interface->hw;
u32 size = ring->count * sizeof(union fm10k_rx_desc);
- u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
- u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
u32 rxint = FM10K_INT_MAP_DISABLE;
u8 rx_pause = interface->rx_pause;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
- fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
+ rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
+ rxqctl &= ~FM10K_RXQCTL_ENABLE;
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
@@ -797,6 +797,8 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
/* enable queue */
+ rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
+ rxqctl |= FM10K_RXQCTL_ENABLE;
fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
/* place buffers on ring for receive data */
@@ -1699,7 +1701,7 @@ void fm10k_down(struct fm10k_intfc *interface)
/* start checking at the last ring to have pending Tx */
for (; i < interface->num_tx_queues; i++)
- if (fm10k_get_tx_pending(interface->tx_ring[i]))
+ if (fm10k_get_tx_pending(interface->tx_ring[i], false))
break;
/* if all the queues are drained, we can break now */
@@ -1835,8 +1837,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
interface->tx_itr = FM10K_TX_ITR_DEFAULT;
interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
- /* initialize vxlan_port list */
+ /* initialize udp port lists */
INIT_LIST_HEAD(&interface->vxlan_port);
+ INIT_LIST_HEAD(&interface->geneve_port);
netdev_rss_key_fill(rss_key, sizeof(rss_key));
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
@@ -1950,9 +1953,18 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct fm10k_intfc *interface;
int err;
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
err = pci_enable_device_mem(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev,
+ "PCI enable device failed: %d\n", err);
return err;
+ }
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err)
@@ -2275,7 +2287,7 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result;
- if (pci_enable_device_mem(pdev)) {
+ if (pci_reenable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 682299dd0ce4..23fb319fd2a0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -867,10 +867,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
qmap_idx = qmap_stride * vf_idx;
- /* MAP Tx queue back to 0 temporarily, and disable it */
- fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
- fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
-
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
* used here to indicate to the VF that it will not have privilege to
* write VLAN_TABLE. All policy is enforced on the PF but this allows
@@ -886,9 +882,35 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
vf_info->mac, vf_vid);
- /* load onto outgoing mailbox, ignore any errors on enqueue */
- if (vf_info->mbx.ops.enqueue_tx)
- vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = 0;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
+ fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
/* verify ring has disabled before modifying base address registers */
txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
@@ -927,16 +949,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
FM10K_TDLEN_ITR_SCALE_SHIFT);
err_out:
- /* configure Queue control register */
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
- txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
- FM10K_TXQCTL_VF | vf_idx;
-
- /* assign VLAN ID */
- for (i = 0; i < queues_per_pool; i++)
- fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
-
/* restore the queue back to VF ownership */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
return err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index f4e75c498287..6bb16c13d9d6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -154,6 +154,7 @@ struct fm10k_hw;
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
#define FM10K_TUNNEL_CFG 0x0040
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
#define FM10K_SWPRI_MAX 16
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2a882916b4f6..2030d7c1dc94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -65,76 +65,72 @@
#include "i40e_dcb.h"
/* Useful i40e defaults */
-#define I40E_MAX_VEB 16
-
-#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
-#define I40E_DEFAULT_NUM_DESCRIPTORS 512
-#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
-#define I40E_MIN_NUM_DESCRIPTORS 64
-#define I40E_MIN_MSIX 2
-#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
-#define I40E_DEFAULT_QUEUES_PER_VF 4
-#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
-#define I40E_FDIR_RING 0
-#define I40E_FDIR_RING_COUNT 32
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
+#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
+#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
-#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 256
-#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
-#define I40E_MAX_USER_PRIORITY 8
-#define I40E_DEFAULT_MSG_ENABLE 4
-#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
-#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 256
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_MSG_ENABLE 4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
-#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
-#define I40E_OEM_VER_BUILD_MASK 0xffff
-#define I40E_OEM_VER_PATCH_MASK 0xff
-#define I40E_OEM_VER_BUILD_SHIFT 8
-#define I40E_OEM_VER_SHIFT 24
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
#define I40E_PHY_DEBUG_ALL \
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
-#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x40
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
-/* magic for getting defines into strings */
-#define STRINGIFY(foo) #foo
-#define XSTRINGIFY(bar) STRINGIFY(bar)
-
-#define I40E_RX_DESC(R, i) \
+#define I40E_RX_DESC(R, i) \
(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
-#define I40E_TX_DESC(R, i) \
+#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
-#define I40E_TX_CTXTDESC(R, i) \
+#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define I40E_TX_FDIRDESC(R, i) \
+#define I40E_TX_FDIRDESC(R, i) \
(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
/* default to trying for four seconds */
-#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/**
* i40e_is_mac_710 - Return true if MAC is X710/XL710
@@ -199,9 +195,9 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
-#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
-#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -387,8 +383,8 @@ struct i40e_pf {
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
u16 lan_veb; /* initial relay, if exists */
-#define I40E_NO_VEB 0xffff
-#define I40E_NO_VSI 0xffff
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
u16 next_vsi; /* Next unallocated VSI - 0-based! */
struct i40e_vsi **vsi;
struct i40e_veb *veb[I40E_MAX_VEB];
@@ -423,8 +419,8 @@ struct i40e_pf {
*/
u16 dcbx_cap;
- u32 fcoe_hmc_filt_num;
- u32 fcoe_hmc_cntx_num;
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock;
@@ -470,10 +466,10 @@ struct i40e_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
+ u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
- u16 stats_idx; /* index of VEB parent */
+ u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
u16 flags;
@@ -534,12 +530,13 @@ struct i40e_vsi {
u32 promisc_threshold;
u16 work_limit;
- u16 int_rate_limit; /* value in usecs */
+ u16 int_rate_limit; /* value in usecs */
+
+ u16 rss_table_size; /* HW RSS table size */
+ u16 rss_size; /* Allocated RSS queues */
+ u8 *rss_hkey_user; /* User configured hash keys */
+ u8 *rss_lut_user; /* User configured lookup table entries */
- u16 rss_table_size; /* HW RSS table size */
- u16 rss_size; /* Allocated RSS queues */
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
u16 rx_buf_len;
@@ -550,14 +547,14 @@ struct i40e_vsi {
int base_vector;
bool irqs_ready;
- u16 seid; /* HW index of this VSI (absolute index) */
- u16 id; /* VSI number */
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
u16 uplink_seid;
- u16 base_queue; /* vsi's first queue in hw array */
- u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
- u16 req_queue_pairs; /* User requested queue pairs */
- u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */
@@ -576,19 +573,16 @@ struct i40e_vsi {
/* TC BW limit max quanta within VSI */
u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
- struct i40e_pf *back; /* Backreference to associated PF */
- u16 idx; /* index in pf->vsi[] */
- u16 veb_idx; /* index of VEB parent */
- struct kobject *kobj; /* sysfs object */
- bool current_isup; /* Sync 'link up' logging */
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
void *priv; /* client driver data reference. */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
-
- /* current rxnfc data */
- struct ethtool_rxnfc rxnfc; /* current rss hash opts */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -707,6 +701,8 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -714,8 +710,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 11cf1a5ebccf..67e396b2b347 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -204,6 +204,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -450,13 +453,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -471,17 +476,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -1582,6 +1589,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 618f18436618..250db0b244b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -148,6 +148,11 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
"Cannot locate client instance virtual channel receive routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+ continue;
+ }
cdev->client->ops->virtchnl_receive(&cdev->lan_info,
cdev->client,
vf_id, msg, len);
@@ -181,6 +186,11 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ continue;
+ }
cdev->lan_info.params = params;
cdev->client->ops->l2_param_change(&cdev->lan_info,
cdev->client,
@@ -306,6 +316,11 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
"Cannot locate client instance VF reset routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
+ continue;
+ }
cdev->client->ops->vf_reset(&cdev->lan_info,
cdev->client, vf_id);
}
@@ -336,6 +351,11 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
"Cannot locate client instance VF enable routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
+ continue;
+ }
cdev->client->ops->vf_enable(&cdev->lan_info,
cdev->client, num_vfs);
}
@@ -370,6 +390,11 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
"Cannot locate client instance VF capability routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
+ continue;
+ }
capable = cdev->client->ops->vf_capable(&cdev->lan_info,
cdev->client,
vf_id);
@@ -559,6 +584,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
pf->hw.bus.device, pf->hw.bus.func);
}
+ mutex_lock(&i40e_client_instance_mutex);
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
if (client->ops && client->ops->open)
@@ -568,10 +594,12 @@ void i40e_client_subtask(struct i40e_pf *pf)
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
} else {
/* remove client instance */
+ mutex_unlock(&i40e_client_instance_mutex);
i40e_client_del_instance(pf, client);
atomic_dec(&client->ref_cnt);
continue;
}
+ mutex_unlock(&i40e_client_instance_mutex);
}
mutex_unlock(&i40e_client_mutex);
}
@@ -654,7 +682,7 @@ int i40e_lan_del_device(struct i40e_pf *pf)
static int i40e_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cdev, *tmp;
- struct i40e_pf *pf = NULL;
+ struct i40e_pf *pf;
int ret = 0;
LIST_HEAD(cdevs_tmp);
@@ -664,12 +692,12 @@ static int i40e_client_release(struct i40e_client *client)
if (strncmp(cdev->client->name, client->name,
I40E_CLIENT_STR_LENGTH))
continue;
+ pf = (struct i40e_pf *)cdev->lan_info.pf;
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (atomic_read(&cdev->ref_cnt) > 0) {
ret = I40E_ERR_NOT_READY;
goto out;
}
- pf = (struct i40e_pf *)cdev->lan_info.pf;
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
@@ -681,8 +709,7 @@ static int i40e_client_release(struct i40e_client *client)
client->name, pf->hw.pf_id);
}
/* delete the client instance from the list */
- list_del(&cdev->list);
- list_add(&cdev->list, &cdevs_tmp);
+ list_move(&cdev->list, &cdevs_tmp);
atomic_dec(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
@@ -811,7 +838,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
wr32(hw, I40E_PFINT_AEQCTL, reg);
}
}
-
+ /* Mitigate sync problems with iwarp VF driver */
+ i40e_flush(hw);
return 0;
err:
kfree(ldev->qvlist_info);
@@ -1009,7 +1037,6 @@ int i40e_unregister_client(struct i40e_client *client)
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index a4601d97fb24..38a6c36a6a0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -36,9 +36,9 @@
#define I40E_CLIENT_VERSION_MINOR 01
#define I40E_CLIENT_VERSION_BUILD 00
#define I40E_CLIENT_VERSION_STR \
- XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
- XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
- XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
+ __stringify(I40E_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40E_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40E_CLIENT_VERSION_BUILD)
struct i40e_client_version {
u8 major;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 05cf9a719bab..0c1875b5b16d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1054,6 +1054,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
+ u32 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -1063,8 +1064,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+
ret = i40e_aq_query_port_ets_config(&pf->hw,
- pf->mac_seid,
+ switch_id,
bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1425,84 +1430,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
kfree(desc);
desc = NULL;
- } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
- (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_filter fd_data;
- u16 packet_len, i, j = 0;
- char *asc_packet;
- u8 *raw_packet;
- bool add = false;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
-
- if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
- if (!asc_packet)
- goto command_write_done;
-
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
-
- if (!raw_packet) {
- kfree(asc_packet);
- asc_packet = NULL;
- goto command_write_done;
- }
-
- cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
- &fd_data.q_index,
- &fd_data.flex_off, &fd_data.pctype,
- &fd_data.dest_vsi, &fd_data.dest_ctl,
- &fd_data.fd_status, &fd_data.cnt_index,
- &fd_data.fd_id, &packet_len, asc_packet);
- if (cnt != 10) {
- dev_info(&pf->pdev->dev,
- "program fd_filter: bad command string, cnt=%d\n",
- cnt);
- kfree(asc_packet);
- asc_packet = NULL;
- kfree(raw_packet);
- goto command_write_done;
- }
-
- /* fix packet length if user entered 0 */
- if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
-
- /* make sure to check the max as well */
- packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
-
- for (i = 0; i < packet_len; i++) {
- cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
- if (!cnt)
- break;
- j += 3;
- }
- dev_info(&pf->pdev->dev, "FD raw packet dump\n");
- print_hex_dump(KERN_INFO, "FD raw packet: ",
- DUMP_PREFIX_OFFSET, 16, 1,
- raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
- if (!ret) {
- dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
- } else {
- dev_info(&pf->pdev->dev,
- "Filter command send failed %d\n", ret);
- }
- kfree(raw_packet);
- raw_packet = NULL;
- kfree(asc_packet);
- asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
@@ -1727,8 +1654,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " globr\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
- dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c912e041d102..92bc8846f1ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1560,13 +1560,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
#endif
for (i = 0; i < vsi->num_queue_pairs; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
@@ -1581,16 +1581,16 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_packets", i);
+ "veb.tc_%d_tx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_bytes", i);
+ "veb.tc_%d_tx_bytes", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_packets", i);
+ "veb.tc_%d_rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_bytes", i);
+ "veb.tc_%d_rx_bytes", i);
p += ETH_GSTRING_LEN;
}
}
@@ -1601,23 +1601,23 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
+ "port.tx_priority_%d_xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
+ "port.tx_priority_%d_xoff", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
+ "port.rx_priority_%d_xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
+ "port.rx_priority_%d_xoff", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ "port.rx_priority_%d_xon_2_xoff", i);
p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
@@ -1970,11 +1970,22 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
static int __i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
@@ -1989,14 +2000,18 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
@@ -2010,18 +2025,44 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, queue);
}
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
@@ -2060,6 +2101,14 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
i40e_flush(hw);
}
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
static int __i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
@@ -2120,12 +2169,27 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
static int i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_set_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
@@ -2141,41 +2205,72 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
**/
static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
{
+ struct i40e_hw *hw = &pf->hw;
+ u8 flow_pctype = 0;
+ u64 i_set = 0;
+
cmd->data = 0;
- if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
- cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
- cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
- return 0;
- }
- /* Report default options for RSS on i40e */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
case UDP_V4_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through to add IP fields */
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case TCP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through to add IP fields */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
+ /* Default is src/dest for IP, no matter the L4 hashing */
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
+ /* Read flow based hash input set register */
+ if (flow_pctype) {
+ i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+ }
+
+ /* Process bits of hash input set */
+ if (i_set) {
+ if (i_set & I40E_L4_SRC_MASK)
+ cmd->data |= RXH_L4_B_0_1;
+ if (i_set & I40E_L4_DST_MASK)
+ cmd->data |= RXH_L4_B_2_3;
+
+ if (cmd->flow_type == TCP_V4_FLOW ||
+ cmd->flow_type == UDP_V4_FLOW) {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else if (cmd->flow_type == TCP_V6_FLOW ||
+ cmd->flow_type == UDP_V6_FLOW) {
+ if (i_set & I40E_L3_V6_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_V6_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
+ }
+
return 0;
}
@@ -2318,6 +2413,51 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
/**
+ * i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @nfc: pointer to user request
+ * @i_setc bits currently set
+ *
+ * Returns value of bits to be set per user request
+ **/
+static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+{
+ u64 i_set = i_setc;
+ u64 src_l3 = 0, dst_l3 = 0;
+
+ if (nfc->data & RXH_L4_B_0_1)
+ i_set |= I40E_L4_SRC_MASK;
+ else
+ i_set &= ~I40E_L4_SRC_MASK;
+ if (nfc->data & RXH_L4_B_2_3)
+ i_set |= I40E_L4_DST_MASK;
+ else
+ i_set &= ~I40E_L4_DST_MASK;
+
+ if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
+ src_l3 = I40E_L3_V6_SRC_MASK;
+ dst_l3 = I40E_L3_V6_DST_MASK;
+ } else if (nfc->flow_type == TCP_V4_FLOW ||
+ nfc->flow_type == UDP_V4_FLOW) {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ } else {
+ /* Any other flow type are not supported here */
+ return i_set;
+ }
+
+ if (nfc->data & RXH_IP_SRC)
+ i_set |= src_l3;
+ else
+ i_set &= ~src_l3;
+ if (nfc->data & RXH_IP_DST)
+ i_set |= dst_l3;
+ else
+ i_set &= ~dst_l3;
+
+ return i_set;
+}
+
+/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
* @cmd: ethtool rxnfc command
@@ -2329,6 +2469,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ u8 flow_pctype = 0;
+ u64 i_set, i_setc;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -2337,75 +2479,39 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
break;
case TCP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
break;
case UDP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2437,13 +2543,23 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
+ if (flow_pctype) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+ i_set = i40e_get_rss_hash_bits(nfc, i_setc);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_pctype);
+ }
+
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw);
- /* Save setting for future output/update */
- pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
-
return 0;
}
@@ -2744,11 +2860,15 @@ static void i40e_get_channels(struct net_device *dev,
static int i40e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
+ const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int count = ch->combined_count;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
int new_count;
+ int err = 0;
/* We do not support setting channels for any other VSI at present */
if (vsi->type != I40E_VSI_MAIN)
@@ -2766,6 +2886,26 @@ static int i40e_set_channels(struct net_device *dev,
if (count > i40e_max_channels(vsi))
return -EINVAL;
+ /* verify that the number of channels does not invalidate any current
+ * flow director rules
+ */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (rule->dest_ctl != drop && count <= rule->q_index) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d assigns flow to queue %d\n",
+ rule->fd_id, rule->q_index);
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Existing filter rules must be deleted to reduce combined channel count to %d\n",
+ count);
+ return err;
+ }
+
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
@@ -2846,15 +2986,13 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
u8 *seed = NULL;
u16 i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
- return 0;
-
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
@@ -2872,8 +3010,12 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
@@ -2943,6 +3085,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+
+ /* flush current ATR settings */
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d0b3a1bb82ca..ac1faee2a5b8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -57,8 +57,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -527,6 +525,7 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
pf->veb[i]->stat_offsets_loaded = false;
}
}
+ pf->hw_csum_rx_error = 0;
}
/**
@@ -1316,7 +1315,7 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
@@ -1909,7 +1908,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
del_list[num_del].vlan_tag = 0;
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
} else {
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan));
@@ -4616,7 +4615,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u8 i, enabled_tc;
+ u8 i, enabled_tc = 1;
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
@@ -4634,8 +4633,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
else
return 1; /* Only TC0 */
- /* At least have TC0 */
- enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i))
num_tc++;
@@ -5245,7 +5242,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
@@ -5942,13 +5939,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR */
+
+ /* Wait for some more space to be available to turn on ATR. We also
+ * must check that no existing ntuple rules for TCP are in effect
+ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp_rule == 0)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -5979,9 +5980,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int fd_room;
int reg;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
@@ -6001,7 +5999,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6021,7 +6019,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6055,9 +6053,6 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
@@ -7157,9 +7152,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
pf->pending_udp_bitmap &= ~BIT_ULL(i);
port = pf->udp_ports[i].index;
if (port)
- ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- pf->udp_ports[i].type,
- NULL, NULL);
+ ret = i40e_aq_add_udp_tunnel(hw, port,
+ pf->udp_ports[i].type,
+ NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7646,7 +7641,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
} else {
pf->num_fdsb_msix = 0;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -7666,6 +7660,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ iwarp_requested = pf->num_iwarp_msix;
+
if (!vectors_left)
pf->num_iwarp_msix = 0;
else if (vectors_left < pf->num_iwarp_msix)
@@ -7679,18 +7675,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
- /* if we're short on vectors for what's desired, we limit
- * the queues per vmdq. If this is still more than are
- * available, the user will need to change the number of
- * queues/vectors used by the PF later with the ethtool
- * channels command
- */
- if (vmdq_vecs < vmdq_vecs_wanted)
- pf->num_vmdq_qps = 1;
- pf->num_vmdq_msix = pf->num_vmdq_qps;
+ if (!vectors_left) {
+ pf->num_vmdq_msix = 0;
+ pf->num_vmdq_qps = 0;
+ } else {
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
- v_budget += vmdq_vecs;
- vectors_left -= vmdq_vecs;
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
}
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
@@ -7702,21 +7703,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->msix_entries[i].entry = i;
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (v_actual != v_budget) {
- /* If we have limited resources, we will start with no vectors
- * for the special features and then allocate vectors to some
- * of these features based on the policy and at the end disable
- * the features that did not get any vectors.
- */
- iwarp_requested = pf->num_iwarp_msix;
- pf->num_iwarp_msix = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
- pf->num_vmdq_msix = 0;
- }
-
if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -7730,9 +7716,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (v_actual != v_budget) {
+ } else if (!vectors_left) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
int vec;
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector limit reached, attempting to redistribute vectors\n");
/* reserve the misc vector */
vec = v_actual - 1;
@@ -7740,7 +7733,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+#ifdef I40E_FCOE
+ pf->num_fcoe_qps = 0;
+ pf->num_fcoe_msix = 0;
+#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7772,9 +7768,14 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ pf->num_fdsb_msix = 1;
+ vec--;
+ }
pf->num_lan_msix = min_t(int,
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
+ pf->num_lan_qps = pf->num_lan_msix;
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7786,6 +7787,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->num_fdsb_msix == 0)) {
+ dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
@@ -7804,6 +7810,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
+ i40e_debug(&pf->hw, I40E_DEBUG_INIT,
+ "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
+ pf->num_lan_msix,
+ pf->num_vmdq_msix * pf->num_vmdq_vsis,
+ pf->num_fdsb_msix,
+ pf->num_iwarp_msix);
+
return v_actual;
}
@@ -7990,72 +8003,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
- struct i40e_aqc_get_set_rss_key_data rss_key;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- bool pf_lut = false;
- u8 *rss_lut;
- int ret, i;
-
- memcpy(&rss_key, seed, sizeof(rss_key));
-
- rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
- if (!rss_lut)
- return -ENOMEM;
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i < vsi->rss_table_size; i++)
- rss_lut[i] = i % vsi->rss_size;
+ int ret = 0;
- ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto config_rss_aq_out;
+ if (seed) {
+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)seed;
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
}
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
- if (vsi->type == I40E_VSI_MAIN)
- pf_lut = true;
-
- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
- vsi->rss_table_size);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
-config_rss_aq_out:
- kfree(rss_lut);
- return ret;
-}
-
-/**
- * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
- * @vsi: VSI structure
- **/
-static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
-{
- u8 seed[I40E_HKEY_ARRAY_SIZE];
- struct i40e_pf *pf = vsi->back;
- u8 *lut;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
- return 0;
-
- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
- vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
- ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
- kfree(lut);
-
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
return ret;
}
@@ -8106,6 +8081,46 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_pf *pf = vsi->back;
+ u8 *lut;
+ int ret;
+
+ if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
+ return 0;
+
+ if (!vsi->rss_size)
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+ /* Use the user configured hash keys and lookup table if there is one,
+ * otherwise use default
+ */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+
+ return ret;
+}
+
+/**
* i40e_config_rss_reg - Configure RSS keys and lut by writing registers
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
@@ -8243,8 +8258,8 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size)
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
{
u16 i;
@@ -8285,6 +8300,8 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -8609,7 +8626,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
@@ -8684,18 +8700,40 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
}
return need_reset;
}
/**
+ * i40e_clear_rss_lut - clear the rx hash lookup table
+ * @vsi: the VSI being configured
+ **/
+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
+ u8 i;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), 0);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -8708,6 +8746,12 @@ static int i40e_set_features(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
bool need_reset;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ i40e_pf_config_rss(pf);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ i40e_clear_rss_lut(vsi);
+
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
@@ -11309,11 +11353,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the Admin Queue resources: %d\n",
- ret_code);
+ i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
@@ -11360,6 +11400,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+ if (!pf) {
+ dev_info(&pdev->dev,
+ "Cannot recover - error happened during device probe\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
rtnl_lock();
@@ -11582,7 +11628,8 @@ static int __init i40e_init_module(void)
* it can't be any worse than using the system workqueue which
* was already single threaded
*/
- i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+ i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ed39cbad24bd..f1feceab758a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -669,7 +669,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
pf->ptp_clock = NULL;
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
- } else {
+ } else if (pf->ptp_clock) {
struct timespec64 ts;
u32 regval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index df7ecc9578c9..6287bf63c43c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -40,6 +40,69 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
@@ -48,14 +111,13 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add)
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
{
- struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
- unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -92,56 +154,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* grab the next descriptor */
i = tx_ring->next_to_use;
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
- memset(first, 0, sizeof(struct i40e_tx_buffer));
-
- tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
-
- fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
- I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
-
- fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
- I40E_TXD_FLTR_QW0_PCTYPE_MASK;
-
- /* Use LAN VSI Id if not programmed by user */
- if (fdir_data->dest_vsi == 0)
- fpt |= (pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
- else
- fpt |= ((u32)fdir_data->dest_vsi <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
- I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
-
- dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- if (add)
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- else
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
-
- dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
- I40E_TXD_FLTR_QW1_DEST_MASK;
-
- dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
- I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
-
- if (fdir_data->cnt_index != 0) {
- dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dcc |= ((u32)fdir_data->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- }
-
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->rsvd = cpu_to_le32(0);
- fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
- fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+ i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
@@ -282,18 +296,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
@@ -532,7 +546,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -545,9 +562,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -584,8 +598,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -754,8 +767,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1864,6 +1877,15 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->rx_rings[idx]->rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->tx_rings[idx]->tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1879,6 +1901,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1887,18 +1910,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -2621,9 +2647,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -2654,8 +2678,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2787,9 +2810,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2814,13 +2835,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
@@ -2840,10 +2859,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_QW1_CMD_SHIFT);
/* notify HW of packet */
- if (!tail_bump)
+ if (!tail_bump) {
prefetchw(tx_desc + 1);
-
- if (tail_bump) {
+ } else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2852,7 +2870,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
wmb();
writel(i, tx_ring->tail);
}
-
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b78c810d1835..508840585645 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index c92a3bdee229..f861d3109d1a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,6 +163,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6fcbf764f32b..54b8ee2583f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -502,8 +502,16 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u32 qtx_ctl;
int ret = 0;
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -991,7 +999,10 @@ complete_reset:
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
- i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ /* Do not notify the client during VF init */
+ if (vf->pf->num_alloc_vfs)
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -1089,7 +1100,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
goto err_iov;
}
}
- i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
/* allocate memory */
vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -1113,6 +1123,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
}
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
+
err_alloc:
if (ret)
i40e_free_vfs(pf);
@@ -1472,7 +1484,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ !vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2213,8 +2226,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2314,6 +2327,7 @@ err:
/* send the response back to the VF */
aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len);
+ kfree(vrh);
return aq_ret;
}
@@ -2742,11 +2756,12 @@ error_param:
* @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
+ * @vlan_proto: vlan protocol
*
* program VF vlan id and/or qos
**/
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos)
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -2769,6 +2784,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -2995,6 +3016,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
ivi->spoofchk = vf->spoofchk;
+ ivi->trusted = vf->trusted;
ret = 0;
error_param:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 875174141451..4012d069939a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -129,8 +129,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 3114dcfa1724..40b0eafd0c71 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -204,6 +204,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -447,13 +450,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -468,17 +473,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -1579,6 +1586,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4db0c0326185..7953c13451b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -302,7 +302,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
u16 i = 0;
@@ -326,6 +325,8 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ u16 len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index a579193b2c21..75f2a2cdd738 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,7 +51,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -64,9 +67,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1312,6 +1311,19 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->rx_rings[idx].rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->tx_rings[idx].tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1326,6 +1338,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1334,18 +1348,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -1832,9 +1849,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -1865,8 +1880,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2015,9 +2029,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2042,13 +2054,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
@@ -2068,10 +2078,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_QW1_CMD_SHIFT);
/* notify HW of packet */
- if (!tail_bump)
+ if (!tail_bump) {
prefetchw(tx_desc + 1);
-
- if (tail_bump) {
+ } else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2080,7 +2089,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
wmb();
writel(i, tx_ring->tail);
}
-
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0112277e5882..abcdecabbc56 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -287,6 +287,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessors routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
@@ -445,4 +453,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index f04ce6cb70dc..bd691ad86673 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -160,6 +160,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 76ed97db28e2..c5fd724313c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -59,32 +59,25 @@ struct i40e_vsi {
unsigned long state;
int base_vector;
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define I40EVF_DEFAULT_TXD 512
-#define I40EVF_DEFAULT_RXD 512
-#define I40EVF_MAX_TXD 4096
-#define I40EVF_MIN_TXD 64
-#define I40EVF_MAX_RXD 4096
-#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048 2048
-#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
-#define I40EVF_MAX_AQ_BUF_SIZE 4096
-#define I40EVF_AQ_LEN 32
-#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -111,7 +104,7 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
- int v_idx; /* vector index in list */
+ int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
cpumask_var_t affinity_mask;
@@ -129,11 +122,11 @@ struct i40e_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define I40EVF_RX_DESC_ADV(R, i) \
+#define I40EVF_RX_DESC_ADV(R, i) \
(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
-#define I40EVF_TX_DESC_ADV(R, i) \
+#define I40EVF_TX_DESC_ADV(R, i) \
(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
-#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
#define OTHER_VECTOR 1
@@ -204,22 +197,25 @@ struct i40evf_adapter {
struct msix_entry *msix_entries;
u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
-#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
-#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
-#define I40EVF_FLAG_RESET_PENDING BIT(9)
-#define I40EVF_FLAG_RESET_NEEDED BIT(10)
+#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40EVF_FLAG_IN_NETPOLL BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
+#define I40EVF_FLAG_RESET_PENDING BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED BIT(10)
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
#define I40EVF_FLAG_PROMISC_ON BIT(15)
#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
-#define I40E_FLAG_FDIR_ATR_ENABLED 0
-#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
/* flags for admin queue service task */
@@ -233,7 +229,7 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
@@ -258,6 +254,7 @@ struct i40evf_adapter {
struct work_struct watchdog_task;
bool netdev_registered;
bool link_up;
+ enum i40e_aq_link_speed link_speed;
enum i40e_virtchnl_ops current_op;
#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index c9c202f6c521..a9940154eead 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -74,13 +74,33 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
static int i40evf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
- /* In the future the VF will be able to query the PF for
- * some information - for now use a dummy value
- */
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
ecmd->supported = 0;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = PORT_NONE;
+ /* Set speed and duplex */
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ break;
+ case I40E_LINK_SPEED_20GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
return 0;
}
@@ -276,93 +296,207 @@ static int i40evf_set_ringparam(struct net_device *netdev,
}
/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
+ * __i40evf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
*
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality.
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
**/
-static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int __i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* Rx and Tx usecs per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= adapter->num_active_queues)
+ return -EINVAL;
+
+ rx_ring = &adapter->rx_rings[queue];
+ tx_ring = &adapter->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
+ * i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
- * Change current coalescing settings.
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
**/
-static int i40evf_set_coalesce(struct net_device *netdev,
+static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
+ return __i40evf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
+ u16 vector;
+
+ adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
+ adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = adapter->rx_rings[queue].q_vector;
+ q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = adapter->tx_rings[queue].q_vector;
+ q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * __i40evf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-
- else
+ if (ec->rx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_rx_coalesce)
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- else if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
else
+ if (ec->tx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_tx_coalesce)
+ netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = &adapter->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ /* Rx and Tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < adapter->num_active_queues; i++)
+ i40evf_set_itr_per_queue(adapter, ec, i);
+ } else if (queue < adapter->num_active_queues) {
+ i40evf_set_itr_per_queue(adapter, ec, queue);
+ } else {
+ netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ adapter->num_active_queues - 1);
+ return -EINVAL;
}
return 0;
}
/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, queue);
+}
+
+/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -513,6 +647,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 600fb9c4a7f0..14372810fc27 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -370,6 +370,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
+ struct i40e_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@@ -377,7 +378,10 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
+ q_vector->ring_mask |= BIT(r_idx);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
}
/**
@@ -391,6 +395,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
+ struct i40e_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@@ -398,9 +403,10 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= BIT(t_idx);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
}
/**
@@ -1007,7 +1013,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
**/
-static int i40evf_up_complete(struct i40evf_adapter *adapter)
+static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state);
@@ -1016,7 +1022,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- return 0;
}
/**
@@ -1037,6 +1042,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
@@ -1154,6 +1160,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1162,6 +1169,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
+ rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
return 0;
@@ -1420,7 +1428,9 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
+ rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
+ rtnl_unlock();
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to setup interrupt capabilities\n");
@@ -1729,6 +1739,7 @@ static void i40evf_reset_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
i40evf_free_traffic_irqs(adapter);
@@ -1767,6 +1778,7 @@ continue_reset:
if (netif_running(adapter->netdev)) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
}
i40evf_irq_disable(adapter);
@@ -1781,8 +1793,7 @@ continue_reset:
i40evf_free_all_tx_resources(adapter);
/* kill and reinit the admin queue */
- if (i40evf_shutdown_adminq(hw))
- dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+ i40evf_shutdown_adminq(hw);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
@@ -1802,6 +1813,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ /* Open RDMA Client again */
+ adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
@@ -1820,9 +1833,7 @@ continue_reset:
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto reset_err;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
} else {
@@ -2052,9 +2063,7 @@ static int i40evf_open(struct net_device *netdev)
i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto err_req_irq;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
@@ -2268,10 +2277,6 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2453,6 +2458,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
netif_carrier_off(netdev);
+ adapter->link_up = false;
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
@@ -2831,7 +2837,8 @@ static int __init i40evf_init_module(void)
pr_info("%s\n", i40evf_copyright);
- i40evf_wq = create_singlethread_workqueue(i40evf_driver_name);
+ i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+ i40evf_driver_name);
if (!i40evf_wq) {
pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index d76c221d4c8a..ddf478d6322b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -817,6 +817,45 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_print_link_message - print link up or down
+ * @adapter: adapter structure
+ *
+ * Log a message telling the world of our wonderous link status
+ */
+static void i40evf_print_link_message(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ char *speed = "Unknown ";
+
+ if (!adapter->link_up) {
+ netdev_info(netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = "40 G";
+ break;
+ case I40E_LINK_SPEED_20GB:
+ speed = "20 G";
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = "10 G";
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = "1000 M";
+ break;
+ case I40E_LINK_SPEED_100MB:
+ speed = "100 M";
+ break;
+ default:
+ break;
+ }
+
+ netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
+}
+
+/**
* i40evf_request_reset
* @adapter: adapter structure
*
@@ -853,16 +892,20 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
(struct i40e_virtchnl_pf_event *)msg;
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- adapter->link_up =
- vpe->event_data.link_event.link_status;
- if (adapter->link_up && !netif_carrier_ok(netdev)) {
- dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- } else if (!adapter->link_up) {
- dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
+ adapter->link_speed =
+ vpe->event_data.link_event.link_speed;
+ if (adapter->link_up !=
+ vpe->event_data.link_event.link_status) {
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+ i40evf_print_link_message(adapter);
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
@@ -937,8 +980,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
- netif_tx_start_all_queues(adapter->netdev);
- netif_carrier_on(adapter->netdev);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 199ff98209cf..acf06051e111 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -188,6 +188,11 @@ struct e1000_adv_tx_context_desc {
/* ETQF register bit definitions */
#define E1000_ETQF_FILTER_ENABLE BIT(26)
#define E1000_ETQF_1588 BIT(30)
+#define E1000_ETQF_IMM_INT BIT(29)
+#define E1000_ETQF_QUEUE_ENABLE BIT(31)
+#define E1000_ETQF_QUEUE_SHIFT 16
+#define E1000_ETQF_QUEUE_MASK 0x00070000
+#define E1000_ETQF_ETYPE_MASK 0x0000FFFF
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2997c443c5dc..2688180a7acd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1024,4 +1024,8 @@
#define E1000_RTTBCNRC_RF_INT_MASK \
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4))
+#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define E1000_VLAPQF_QUEUE_MASK 0x03
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 21d9d02885cb..d84afdd83e53 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -309,6 +309,7 @@
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
+#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5387b3a96489..d11093dce1b9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -350,11 +350,49 @@ struct hwmon_buff {
};
#endif
+/* The number of L2 ether-type filter registers, Index 3 is reserved
+ * for PTP 1588 timestamp
+ */
+#define MAX_ETYPE_FILTER (4 - 1)
+/* ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters here!!
+ *
+ * Current filters: Filter 3
+ */
+#define IGB_ETQF_FILTER_1588 3
+
#define IGB_N_EXTTS 2
#define IGB_N_PEROUT 2
#define IGB_N_SDP 4
#define IGB_RETA_SIZE 128
+enum igb_filter_match_flags {
+ IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
+ IGB_FILTER_FLAG_VLAN_TCI = 0x2,
+};
+
+#define IGB_MAX_RXNFC_FILTERS 16
+
+/* RX network flow classification data structure */
+struct igb_nfc_input {
+ /* Byte layout in order, all values with MSB first:
+ * match_flags - 1 byte
+ * etype - 2 bytes
+ * vlan_tci - 2 bytes
+ */
+ u8 match_flags;
+ __be16 etype;
+ __be16 vlan_tci;
+};
+
+struct igb_nfc_filter {
+ struct hlist_node nfc_node;
+ struct igb_nfc_input filter;
+ u16 etype_reg_index;
+ u16 sw_idx;
+ u16 action;
+};
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -451,6 +489,7 @@ struct igb_adapter {
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ bool pps_sys_wrap_on;
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
@@ -473,6 +512,13 @@ struct igb_adapter {
int copper_tries;
struct e1000_info ei;
u16 eee_advert;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ unsigned int nfc_filter_count;
+ /* lock for RX network flow classification filter */
+ spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
};
/* flags controlling PTP/1588 function */
@@ -599,4 +645,9 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+int igb_add_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input);
+int igb_erase_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 64e91c575a39..737b664d004c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2431,6 +2431,63 @@ static int igb_get_ts_info(struct net_device *dev,
}
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igb_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGB_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGB_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -2484,6 +2541,16 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igb_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
case ETHTOOL_GRXFH:
ret = igb_get_rss_hash_opts(adapter, cmd);
break;
@@ -2598,6 +2665,279 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
return 0;
}
+static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(E1000_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= E1000_ETQF_FILTER_ENABLE;
+ etqf &= ~E1000_ETQF_ETYPE_MASK;
+ etqf |= (etype & E1000_ETQF_ETYPE_MASK);
+
+ etqf &= ~E1000_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT)
+ & E1000_ETQF_QUEUE_MASK);
+ etqf |= E1000_ETQF_QUEUE_ENABLE;
+
+ wr32(E1000_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(E1000_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) &&
+ (queue_index != input->action)) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(E1000_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
+{
+ int err = -EINVAL;
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
+ err = igb_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
+ err = igb_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igb_clear_etype_filter_regs(struct igb_adapter *adapter,
+ u16 reg_index)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 etqf = rd32(E1000_ETQF(reg_index));
+
+ etqf &= ~E1000_ETQF_QUEUE_ENABLE;
+ etqf &= ~E1000_ETQF_QUEUE_MASK;
+ etqf &= ~E1000_ETQF_FILTER_ENABLE;
+
+ wr32(E1000_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(E1000_VLAPQF);
+ vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority,
+ E1000_VLAPQF_QUEUE_MASK);
+
+ wr32(E1000_VLAPQF, vlapqf);
+}
+
+int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE)
+ igb_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
+ igb_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ return 0;
+}
+
+static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igb_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input)
+ err = igb_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&parent->nfc_node, &input->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igb_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) ||
+ (fsp->ring_cookie >= adapter->num_rx_queues)) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGB_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK))
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igb_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct igb_adapter *adapter = netdev_priv(dev);
@@ -2607,6 +2947,11 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXFH:
ret = igb_set_rss_hash_opt(adapter, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igb_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igb_del_ethtool_nfc_entry(adapter, cmd);
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 942a89fb0090..edc9a6ac5169 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -58,7 +58,7 @@
#include "igb.h"
#define MAJ 5
-#define MIN 3
+#define MIN 4
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
@@ -169,13 +169,15 @@ static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
+ int vf, u16 vlan, u8 qos, __be16 vlan_proto);
static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
+static void igb_nfc_filter_exit(struct igb_adapter *adapter);
+static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
@@ -1611,6 +1613,7 @@ static void igb_configure(struct igb_adapter *adapter)
igb_setup_mrqc(adapter);
igb_setup_rctl(adapter);
+ igb_nfc_filter_restore(adapter);
igb_configure_tx(adapter);
igb_configure_rx(adapter);
@@ -2059,6 +2062,21 @@ static int igb_set_features(struct net_device *netdev,
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ struct hlist_node *node2;
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &adapter->nfc_filter_list, nfc_node) {
+ igb_erase_filter(adapter, rule);
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ }
+ spin_unlock(&adapter->nfc_lock);
+ adapter->nfc_filter_count = 0;
+ }
+
netdev->features = features;
if (netif_running(netdev))
@@ -3053,6 +3071,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
@@ -3240,6 +3259,8 @@ static int __igb_close(struct net_device *netdev, bool suspending)
igb_down(adapter);
igb_free_irq(adapter);
+ igb_nfc_filter_exit(adapter);
+
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
@@ -6201,14 +6222,17 @@ static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
return 0;
}
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct igb_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
igb_disable_port_vlan(adapter, vf);
}
@@ -8306,4 +8330,28 @@ int igb_reinit_queues(struct igb_adapter *adapter)
return err;
}
+
+static void igb_nfc_filter_exit(struct igb_adapter *adapter)
+{
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igb_erase_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+static void igb_nfc_filter_restore(struct igb_adapter *adapter)
+{
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igb_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 336c103ae374..a7895c4cbcc3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -591,6 +591,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
tsim |= TSINTR_SYS_WRAP;
else
tsim &= ~TSINTR_SYS_WRAP;
+ igb->pps_sys_wrap_on = !!on;
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
@@ -998,12 +999,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
/* define ethertype filter for timestamped packets */
if (is_l2)
- wr32(E1000_ETQF(3),
+ wr32(E1000_ETQF(IGB_ETQF_FILTER_1588),
(E1000_ETQF_FILTER_ENABLE | /* enable filter */
E1000_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
- wr32(E1000_ETQF(3), 0);
+ wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0);
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
@@ -1159,7 +1160,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
@@ -1235,7 +1236,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
wr32(E1000_TSAUXC, 0x0);
wr32(E1000_TSSDP, 0x0);
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
+ wr32(E1000_TSIM,
+ TSYNC_INTERRUPTS |
+ (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0));
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0778ba65083..12bb877df860 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.2-k"
+#define DRV_VERSION "2.4.0-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 9475ff9055aa..b06e32d0d22a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -45,10 +45,10 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif /* IS_ENABLED(CONFIG_FCOE) */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
@@ -645,6 +645,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
+#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
@@ -653,13 +654,12 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
-#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
-#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
+#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
/* Tx fast path data */
@@ -673,6 +673,7 @@ struct ixgbe_adapter {
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
+ __be16 geneve_port;
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -840,6 +841,7 @@ enum ixgbe_state_t {
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
+ __IXGBE_RESET_REQUESTED,
};
struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index c47b605e8651..77d3039283f6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -99,6 +99,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
supported = true;
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0d7209eb5abf..f49f80380aa5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -193,7 +193,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ SUPPORTED_1000baseKX_Full :
+ SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
SUPPORTED_1000baseKX_Full :
@@ -311,6 +313,25 @@ static int ixgbe_get_settings(struct net_device *netdev,
break;
}
+ /* Indicate pause support */
+ ecmd->supported |= SUPPORTED_Pause;
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ ecmd->advertising |= ADVERTISED_Pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ ecmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ break;
+ case ixgbe_fc_tx_pause:
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ break;
+ default:
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ }
+
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
@@ -2926,9 +2947,13 @@ static u32 ixgbe_rss_indir_size(struct net_device *netdev)
static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
{
int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+ u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
for (i = 0; i < reta_size; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -3039,8 +3064,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- /* SR-IOV currently only allows one queue on the PF */
- max_combined = 1;
+ /* Limit value based on the queue mask */
+ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index bcdc88444ceb..15ab337fd7ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -515,15 +515,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
+ if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
- /* 32 pool mode with 4 queues per pool */
+ /* 32 pool mode with up to 4 queues per pool */
} else {
vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
rss_m = IXGBE_RSS_4Q_MASK;
- rss_i = 4;
+ /* We can support 4, 2, or 1 queues */
+ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
}
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b4f03748adc0..a244d9a67264 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
@@ -1103,7 +1104,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
@@ -1495,7 +1496,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
bool encap_pkt = false;
skb_checksum_none_assert(skb);
@@ -1504,8 +1504,8 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
- if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
- (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
+ /* check for VXLAN and Geneve packets */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
encap_pkt = true;
skb->encapsulation = 1;
}
@@ -2777,7 +2777,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
}
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
@@ -3007,7 +3007,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
@@ -3224,7 +3224,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
- e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+ hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
@@ -3248,7 +3248,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else if (tcs > 1)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mtqc |= IXGBE_MTQC_32VF;
else
mtqc |= IXGBE_MTQC_64VF;
@@ -3475,12 +3476,12 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- /* Program table for at least 2 queues w/ SR-IOV so that VFs can
+ /* Program table for at least 4 queues w/ SR-IOV so that VFs can
* make full use of any rings they may have. We will use the
* PSRTYPE register to control how many rings we use within the PF.
*/
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
- rss_i = 2;
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
+ rss_i = 4;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -3544,7 +3545,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
else if (tcs > 1)
mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
@@ -3922,6 +3924,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
rfctl &= ~IXGBE_RFCTL_RSC_DIS;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
rfctl |= IXGBE_RFCTL_RSC_DIS;
+
+ /* disable NFS filtering */
+ rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* Program registers for the distribution of queues */
@@ -4102,23 +4107,20 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
- /* legacy case, we can just disable VLAN filtering */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ } else {
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
+ /* Nothing to do for 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
/* We are already in VLAN promisc, nothing to do */
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
return;
@@ -4126,10 +4128,6 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
- /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4201,19 +4199,9 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
+ hw->mac.type == ixgbe_mac_82598EB)
return;
- }
/* We are not in VLAN promisc, nothing to do */
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
@@ -4586,18 +4574,23 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
}
}
-static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
{
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vxlanctrl;
+
+ if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
+ IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
+ return;
+
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
+
+ if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
adapter->vxlan_port = 0;
- break;
- default:
- break;
- }
+
+ if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
+ adapter->geneve_port = 0;
}
#ifdef CONFIG_IXGBE_DCB
@@ -5500,8 +5493,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
- adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
- IXGBE_FLAG2_RESET_REQUESTED);
+ clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
+ adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
del_timer_sync(&adapter->service_timer);
@@ -5711,8 +5704,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
- case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ /* fall through */
+ case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
#endif
@@ -6144,7 +6139,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
- ixgbe_clear_vxlan_port(adapter);
+ ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
udp_tunnel_get_rx_info(netdev);
return 0;
@@ -6921,7 +6916,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
* (Do the reset outside of interrupt context).
*/
e_warn(drv, "initiating reset to clear Tx work after link loss\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
}
}
}
@@ -7187,11 +7182,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
{
- if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
-
/* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_REMOVING, &adapter->state) ||
@@ -7225,9 +7218,9 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
- if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
rtnl_lock();
- adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
udp_tunnel_get_rx_info(adapter->netdev);
rtnl_unlock();
}
@@ -7667,6 +7660,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
if (adapter->vxlan_port &&
udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.network = skb_inner_network_header(skb);
+
+ if (adapter->geneve_port &&
+ udp_hdr(skb)->dest == adapter->geneve_port)
+ hdr.network = skb_inner_network_header(skb);
}
/* Currently only IPv4/IPv6 with TCP is supported */
@@ -8802,10 +8799,23 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM)
- adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
- else
- ixgbe_clear_vxlan_port(adapter);
+ if (features & NETIF_F_RXCSUM) {
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
+ } else {
+ u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
+
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ }
+ }
+
+ if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
+ if (features & NETIF_F_RXCSUM) {
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
+ } else {
+ u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
+
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ }
}
if (need_reset)
@@ -8818,67 +8828,115 @@ static int ixgbe_set_features(struct net_device *netdev,
}
/**
- * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
* @dev: The port's netdev
* @ti: Tunnel endpoint information
**/
-static void ixgbe_add_vxlan_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
__be16 port = ti->port;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
+ u32 port_shift = 0;
+ u32 reg;
if (ti->sa_family != AF_INET)
return;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
- if (adapter->vxlan_port == port)
- return;
+ if (adapter->vxlan_port == port)
+ return;
+
+ if (adapter->vxlan_port) {
+ netdev_info(dev,
+ "VXLAN port %d set, not adding port %d\n",
+ ntohs(adapter->vxlan_port),
+ ntohs(port));
+ return;
+ }
- if (adapter->vxlan_port) {
- netdev_info(dev,
- "Hit Max num of VXLAN ports, not adding port %d\n",
- ntohs(port));
+ adapter->vxlan_port = port;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->geneve_port == port)
+ return;
+
+ if (adapter->geneve_port) {
+ netdev_info(dev,
+ "GENEVE port %d set, not adding port %d\n",
+ ntohs(adapter->geneve_port),
+ ntohs(port));
+ return;
+ }
+
+ port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
+ adapter->geneve_port = port;
+ break;
+ default:
return;
}
- adapter->vxlan_port = port;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
+ reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
}
/**
- * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
* @dev: The port's netdev
* @ti: Tunnel endpoint information
**/
-static void ixgbe_del_vxlan_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u32 port_mask;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
+ ti->type != UDP_TUNNEL_TYPE_GENEVE)
return;
if (ti->sa_family != AF_INET)
return;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
- if (adapter->vxlan_port != ti->port) {
- netdev_info(dev, "Port %d was not found, not deleting\n",
- ntohs(ti->port));
+ if (adapter->vxlan_port != ti->port) {
+ netdev_info(dev, "VXLAN port %d not found\n",
+ ntohs(ti->port));
+ return;
+ }
+
+ port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->geneve_port != ti->port) {
+ netdev_info(dev, "GENEVE port %d not found\n",
+ ntohs(ti->port));
+ return;
+ }
+
+ port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
+ break;
+ default:
return;
}
- ixgbe_clear_vxlan_port(adapter);
- adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
}
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -9192,8 +9250,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
- .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
+ .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
+ .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index db0731e05401..021ab9b89c71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -346,8 +346,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
}
}
- /* clear value if nothing found */
- hw->phy.mdio.prtad = 0;
+ /* indicate no PHY found */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
return IXGBE_ERR_PHY_ADDR_INVALID;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5431bfe3339..a92277683a64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1254,7 +1254,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
return err;
- } else
+ } else if (adapter->ptp_clock)
e_dev_info("registered PHC device on %s\n", netdev->name);
/* set default timestamp mode to disabled here. We do this in
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8618599dfd6f..7e5d9850e4b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -329,13 +329,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ ixgbe_sriov_reinit(adapter);
+
err = pci_enable_sriov(dev, num_vfs);
if (err) {
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
return err;
}
ixgbe_get_vfs(adapter);
- ixgbe_sriov_reinit(adapter);
return num_vfs;
#else
@@ -1354,13 +1356,16 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
return err;
}
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
if (vlan || qos) {
/* Check if there is already a port VLAN set, if so
* we have to delete the old one first before we
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 47e65e2f886a..0c7977d27b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,7 +43,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 1248a9936f7a..31d82e3abac8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -90,6 +90,7 @@
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
@@ -487,6 +488,13 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
* Filter Table */
+/* masks for accessing VXLAN and GENEVE UDP ports */
+#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
+#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
+
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
+
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
@@ -1823,6 +1831,9 @@ enum {
#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8)
+#define IXGBE_X557_MAX_LED_INDEX 3
+#define IXGBE_X557_LED_PROVISIONING 0xC430
/* LED modes */
#define IXGBE_LED_LINK_UP 0x0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 4716ca499e67..7e6b9267ca9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -295,6 +295,12 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
@@ -1453,7 +1459,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
- if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
/* Get external PHY device id */
@@ -2114,6 +2120,50 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
return ixgbe_enable_lasi_ext_t_x550em(hw);
}
+/**
+ * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn on
+ **/
+static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
+ hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return 0;
+}
+
+/**
+ * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn off
+ **/
+static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
+ hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return 0;
+}
+
/** ixgbe_get_lcd_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
@@ -2344,18 +2394,12 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
* PHY address. This register field was has only been used for X552.
*/
- if (!hw->phy.nw_mng_if_sel) {
- if (hw->mac.type == ixgbe_mac_x550em_a) {
- struct ixgbe_adapter *adapter = hw->back;
-
- e_warn(drv, "nw_mng_if_sel not set\n");
- }
- return;
+ if (hw->mac.type == ixgbe_mac_x550em_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
}
-
- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
}
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
@@ -2456,6 +2500,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
media_type = ixgbe_media_type_copper;
break;
default:
@@ -2514,6 +2559,9 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_SFP:
/* Config MDIO clock speed before the first MDIO PHY access */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -2853,8 +2901,6 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.write_analog_reg8 = NULL, \
.set_rxpba = &ixgbe_set_rxpba_generic, \
.check_link = &ixgbe_check_mac_link_generic, \
- .led_on = &ixgbe_led_on_generic, \
- .led_off = &ixgbe_led_off_generic, \
.blink_led_start = &ixgbe_blink_led_start_X540, \
.blink_led_stop = &ixgbe_blink_led_stop_X540, \
.set_rar = &ixgbe_set_rar_generic, \
@@ -2886,6 +2932,8 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
@@ -2904,6 +2952,8 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_t_x550em,
+ .led_off = ixgbe_led_off_t_x550em,
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2922,6 +2972,8 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
static struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_t_x550em,
+ .led_off = ixgbe_led_off_t_x550em,
.reset_hw = ixgbe_reset_hw_X550em,
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2997,6 +3049,8 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
.write_reg = &ixgbe_write_phy_reg_x550a,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index be52f597688b..5639fbe294d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -502,12 +502,9 @@ extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
-#ifdef DEBUG
-char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
-#define hw_dbg(hw, format, arg...) \
- printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(hw, format, arg...) do {} while (0)
-#endif
+#define ixgbevf_hw_to_netdev(hw) \
+ (((struct ixgbevf_adapter *)(hw)->back)->netdev)
+#define hw_dbg(hw, format, arg...) \
+ netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d9d6616f02a4..7eaac3234049 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1612,7 +1612,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
- pr_err("Could not enable Tx Queue %d\n", reg_idx);
+ hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
/**
@@ -1810,8 +1810,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
dev_err(&adapter->pdev->dev,
"Failed to set MTU at %d\n", netdev->mtu);
@@ -2993,6 +2995,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
**/
int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -3757,8 +3760,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index a52f70ec42b6..d46ba1dabcb7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -284,7 +284,8 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (addr)
ether_addr_copy(msg_addr, addr);
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -441,7 +442,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
ether_addr_copy(msg_addr, addr);
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -551,7 +553,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
msgbuf[1] = xcast_mode;
- err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (err)
return err;
@@ -588,7 +591,8 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (err)
goto mbx_err;
@@ -791,7 +795,8 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -837,7 +842,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[1] = api;
msg[2] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg,
+ sizeof(msg) / sizeof(u32));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -887,7 +893,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg,
+ sizeof(msg) / sizeof(u32));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 8982c882af1b..a0d1b084ecec 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -211,8 +211,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!dev->regs) {
dev_err(&pdev->dev, "Unable to remap SMI register\n");
- ret = -ENODEV;
- goto out_mdio;
+ return -ENODEV;
}
init_waitqueue_head(&dev->smi_busy_wait);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b74548728fb5..5cb07c2017bf 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -400,7 +400,6 @@ struct mvneta_port {
u16 rx_ring_size;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
phy_interface_t phy_interface;
struct device_node *phy_node;
unsigned int link;
@@ -637,8 +636,9 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
}
/* Get System Network Statistics */
-struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static struct rtnl_link_stats64 *
+mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
@@ -2653,6 +2653,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev);
+ struct net_device *ndev = pp->dev;
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
@@ -2670,7 +2671,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
(MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
- mvneta_fixed_link_update(pp, pp->phy_dev);
+ mvneta_fixed_link_update(pp, ndev->phydev);
}
}
@@ -2965,6 +2966,7 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
+ struct net_device *ndev = pp->dev;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2987,15 +2989,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE);
- phy_start(pp->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(pp->dev);
}
static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
+ struct net_device *ndev = pp->dev;
- phy_stop(pp->phy_dev);
+ phy_stop(ndev->phydev);
for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
@@ -3168,7 +3171,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
static void mvneta_adjust_link(struct net_device *ndev)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = pp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
int status_change = 0;
if (phydev->link) {
@@ -3246,7 +3249,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
- pp->phy_dev = phy_dev;
pp->link = 0;
pp->duplex = 0;
pp->speed = 0;
@@ -3256,8 +3258,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
static void mvneta_mdio_remove(struct mvneta_port *pp)
{
- phy_disconnect(pp->phy_dev);
- pp->phy_dev = NULL;
+ struct net_device *ndev = pp->dev;
+
+ phy_disconnect(ndev->phydev);
}
/* Electing a CPU must be done in an atomic way: it should be done
@@ -3515,42 +3518,31 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mvneta_port *pp = netdev_priv(dev);
-
- if (!pp->phy_dev)
+ if (!dev->phydev)
return -ENOTSUPP;
- return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
/* Ethtool methods */
-/* Get settings (phy address, speed) for ethtools */
-int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+/* Set link ksettings (phy address, speed) for ethtools */
+static int
+mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mvneta_port *pp = netdev_priv(dev);
-
- if (!pp->phy_dev)
- return -ENODEV;
-
- return phy_ethtool_gset(pp->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mvneta_port *pp = netdev_priv(dev);
- struct phy_device *phydev = pp->phy_dev;
+ struct mvneta_port *pp = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
if (!phydev)
return -ENODEV;
- if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
u32 val;
- mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
+ mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
- if (cmd->autoneg == AUTONEG_DISABLE) {
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
@@ -3567,17 +3559,17 @@ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
- pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
+ pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
netdev_info(pp->dev, "autoneg status set to %i\n",
pp->use_inband_status);
- if (netif_running(dev)) {
+ if (netif_running(ndev)) {
mvneta_port_down(pp);
mvneta_port_up(pp);
}
}
- return phy_ethtool_sset(pp->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
/* Set interrupt coalescing for ethtools */
@@ -3841,8 +3833,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
const struct ethtool_ops mvneta_eth_tool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = mvneta_ethtool_get_settings,
- .set_settings = mvneta_ethtool_set_settings,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
.get_drvinfo = mvneta_ethtool_get_drvinfo,
@@ -3855,6 +3845,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
};
/* Initialize hw */
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e74fd44a92f7..a32de432800c 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -133,7 +133,7 @@ struct mvneta_bm_pool {
void *mvneta_frag_alloc(unsigned int frag_size);
void mvneta_frag_free(unsigned int frag_size, void *data);
-#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
+#if IS_ENABLED(CONFIG_MVNETA_BM)
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 467138b423d3..f05ea56dcff2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3070,7 +3070,7 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
goto done;
}
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
sky2_read32(hw, B0_Y2_SP_LISR);
done:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3743af8f1ded..ad4ab979507b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
@@ -51,7 +52,7 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "esw", "gp1", "gp2"
+ "ethif", "esw", "gp1", "gp2", "trgpll"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -134,6 +135,33 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
return _mtk_mdio_read(eth, phy_addr, phy_reg);
}
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+{
+ u32 val;
+ int ret;
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -144,7 +172,10 @@ static void mtk_phy_link_adjust(struct net_device *dev)
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
MAC_MCR_BACKPR_EN;
- switch (mac->phy_dev->speed) {
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ switch (dev->phydev->speed) {
case SPEED_1000:
mcr |= MAC_MCR_SPEED_1000;
break;
@@ -153,20 +184,23 @@ static void mtk_phy_link_adjust(struct net_device *dev)
break;
};
- if (mac->phy_dev->link)
+ if (mac->id == 0 && !mac->trgmii)
+ mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
+
+ if (dev->phydev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex) {
+ if (dev->phydev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
+ if (dev->phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
- if (mac->phy_dev->asym_pause)
+ if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
@@ -183,7 +217,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (mac->phy_dev->link)
+ if (dev->phydev->link)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
@@ -192,17 +226,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
struct device_node *phy_node)
{
- const __be32 *_addr = NULL;
struct phy_device *phydev;
- int phy_mode, addr;
-
- _addr = of_get_property(phy_node, "reg", NULL);
+ int phy_mode;
- if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
- pr_err("%s: invalid phy address\n", phy_node->name);
- return -EINVAL;
- }
- addr = be32_to_cpu(*_addr);
phy_mode = of_get_phy_mode(phy_node);
if (phy_mode < 0) {
dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
@@ -221,17 +247,17 @@ static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
mac->id, phydev_name(phydev), phydev->phy_id,
phydev->drv->name);
- mac->phy_dev = phydev;
-
return 0;
}
-static int mtk_phy_connect(struct mtk_mac *mac)
+static int mtk_phy_connect(struct net_device *dev)
{
- struct mtk_eth *eth = mac->hw;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth;
struct device_node *np;
- u32 val, ge_mode;
+ u32 val;
+ eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
if (!np && of_phy_is_fixed_link(mac->of_node))
if (!of_phy_register_fixed_link(mac->of_node))
@@ -240,22 +266,24 @@ static int mtk_phy_connect(struct mtk_mac *mac)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ mac->trgmii = true;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
- ge_mode = 0;
+ mac->ge_mode = 0;
break;
case PHY_INTERFACE_MODE_MII:
- ge_mode = 1;
+ mac->ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
- ge_mode = 2;
+ mac->ge_mode = 2;
break;
case PHY_INTERFACE_MODE_RMII:
if (!mac->id)
goto err_phy;
- ge_mode = 3;
+ mac->ge_mode = 3;
break;
default:
goto err_phy;
@@ -264,23 +292,26 @@ static int mtk_phy_connect(struct mtk_mac *mac)
/* put the gmac into the right mode */
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- mtk_phy_connect_node(eth, mac, np);
- mac->phy_dev->autoneg = AUTONEG_ENABLE;
- mac->phy_dev->speed = 0;
- mac->phy_dev->duplex = 0;
+ /* couple phydev to net_device */
+ if (mtk_phy_connect_node(eth, mac, np))
+ goto err_phy;
+
+ dev->phydev->autoneg = AUTONEG_ENABLE;
+ dev->phydev->speed = 0;
+ dev->phydev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
- mac->phy_dev->supported |=
+ dev->phydev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
- mac->phy_dev->advertising = mac->phy_dev->supported |
+ dev->phydev->advertising = dev->phydev->supported |
ADVERTISED_Autoneg;
- phy_start_aneg(mac->phy_dev);
+ phy_start_aneg(dev->phydev);
of_node_put(np);
@@ -288,7 +319,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
err_phy:
of_node_put(np);
- dev_err(eth->dev, "invalid phy_mode\n");
+ dev_err(eth->dev, "%s: invalid phy\n", __func__);
return -EINVAL;
}
@@ -336,25 +367,27 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
}
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
+static inline void mtk_irq_disable(struct mtk_eth *eth,
+ unsigned reg, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, reg);
+ mtk_w32(eth, val & ~mask, reg);
spin_unlock_irqrestore(&eth->irq_lock, flags);
}
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
+static inline void mtk_irq_enable(struct mtk_eth *eth,
+ unsigned reg, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, reg);
+ mtk_w32(eth, val | mask, reg);
spin_unlock_irqrestore(&eth->irq_lock, flags);
}
@@ -363,18 +396,20 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
const char *macaddr = dev->dev_addr;
- unsigned long flags;
if (ret)
return ret;
- spin_lock_irqsave(&mac->hw->page_lock, flags);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ spin_lock_bh(&mac->hw->page_lock);
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MTK_GDMA_MAC_ADRH(mac->id));
mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
(macaddr[4] << 8) | macaddr[5],
MTK_GDMA_MAC_ADRL(mac->id));
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
+ spin_unlock_bh(&mac->hw->page_lock);
return 0;
}
@@ -759,7 +794,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct net_device_stats *stats = &dev->stats;
- unsigned long flags;
bool gso = false;
int tx_num;
@@ -767,14 +801,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
* however we have 2 queues running on the same ring so we need to lock
* the ring access
*/
- spin_lock_irqsave(&eth->page_lock, flags);
+ spin_lock(&eth->page_lock);
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto drop;
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
return NETDEV_TX_BUSY;
}
@@ -799,22 +836,62 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
return NETDEV_TX_OK;
drop:
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return &eth->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = &eth->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
- int idx = ring->calc_idx;
+ struct mtk_rx_ring *ring;
+ int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
@@ -826,7 +903,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_addr_t dma_addr;
int mac = 0;
- idx = NEXT_RX_DESP_IDX(idx);
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -841,6 +922,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev = eth->netdev[mac];
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto release_desc;
+
/* alloc new buffer */
new_data = napi_alloc_frag(ring->frag_size);
if (unlikely(!new_data)) {
@@ -890,17 +974,19 @@ release_desc:
rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx;
+
+ done++;
+ }
+
+rx_done:
+ if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
- mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
- done++;
+ mtk_update_rx_cpu_idx(eth);
}
- if (done < budget)
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
-
return done;
}
@@ -1009,7 +1095,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1019,30 +1105,33 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
u32 status, mask;
int rx_done = 0;
+ int remain_budget = budget;
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
- rx_done = mtk_poll_rx(napi, budget, eth);
+
+poll_again:
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, remain_budget, eth);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n",
rx_done, status, mask);
}
-
- if (rx_done == budget)
- return budget;
-
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (status & MTK_RX_DONE_INT)
+ if (rx_done == remain_budget)
return budget;
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ if (status & MTK_RX_DONE_INT) {
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
napi_complete(napi);
- mtk_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
- return rx_done;
+ return rx_done + budget - remain_budget;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
@@ -1089,6 +1178,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
return 0;
@@ -1117,32 +1207,41 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
}
-static int mtk_rx_alloc(struct mtk_eth *eth)
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
+ int rx_data_len, rx_dma_size;
int i;
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
- ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (!ring->data[i])
return -ENOMEM;
}
ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ rx_dma_size * sizeof(*ring->dma),
&ring->phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->dma)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
ring->data[i] + NET_SKB_PAD,
ring->buf_size,
@@ -1153,28 +1252,30 @@ static int mtk_rx_alloc(struct mtk_eth *eth)
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
- ring->calc_idx = MTK_DMA_SIZE - 1;
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0;
}
-static void mtk_rx_clean(struct mtk_eth *eth)
+static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int i;
if (ring->data && ring->dma) {
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring->dma_size; i++) {
if (!ring->data[i])
continue;
if (!ring->dma[i].rxd1)
@@ -1191,13 +1292,275 @@ static void mtk_rx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
ring->dma = NULL;
}
}
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ int err = 0;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ return err;
+}
+
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
@@ -1218,6 +1581,7 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
+ u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
@@ -1233,10 +1597,21 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (err)
return err;
- err = mtk_rx_alloc(eth);
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
/* Enable random early drop and set drop threshold automatically */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
MTK_QDMA_FC_THRES);
@@ -1261,7 +1636,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
- mtk_rx_clean(eth);
+ mtk_rx_clean(eth, 0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, i);
+ }
+
kfree(eth->scratch_head);
}
@@ -1282,7 +1664,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1294,7 +1676,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
- mtk_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1305,11 +1687,12 @@ static void mtk_poll_controller(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
- mtk_irq_disable(eth, int_mask);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_irq_enable(eth, int_mask);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
#endif
@@ -1324,11 +1707,15 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
- MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+
return 0;
}
@@ -1346,11 +1733,12 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
atomic_inc(&eth->dma_refcnt);
- phy_start(mac->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -1358,16 +1746,15 @@ static int mtk_open(struct net_device *dev)
static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
- unsigned long flags;
u32 val;
int i;
/* stop the dma engine */
- spin_lock_irqsave(&eth->page_lock, flags);
+ spin_lock_bh(&eth->page_lock);
val = mtk_r32(eth, glo_cfg);
mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
glo_cfg);
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock_bh(&eth->page_lock);
/* wait for dma stop */
for (i = 0; i < 10; i++) {
@@ -1386,32 +1773,63 @@ static int mtk_stop(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
netif_tx_disable(dev);
- phy_stop(mac->phy_dev);
+ phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */
if (!atomic_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
return 0;
}
-static int __init mtk_hw_init(struct mtk_eth *eth)
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
- int err, i;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+ int i, val;
+
+ if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
- /* reset the frame engine */
- reset_control_assert(eth->rstc);
- usleep_range(10, 20);
- reset_control_deassert(eth->rstc);
- usleep_range(10, 20);
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i])
+ continue;
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
+ val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+ }
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
/* Set GE2 driving and slew rate */
regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
@@ -1431,22 +1849,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
- err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, ~0);
+ mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1460,9 +1867,8 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
for (i = 0; i < 2; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
- /* setup the forward port to send frame to QDMA */
+ /* setup the forward port to send frame to PDMA */
val &= ~0xffff;
- val |= 0x5555;
/* Enable RX checksum */
val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
@@ -1474,6 +1880,22 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
static int __init mtk_init(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -1492,7 +1914,7 @@ static int __init mtk_init(struct net_device *dev)
dev->addr_assign_type = NET_ADDR_RANDOM;
}
- return mtk_phy_connect(mac);
+ return mtk_phy_connect(dev);
}
static void mtk_uninit(struct net_device *dev)
@@ -1500,19 +1922,18 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(mac->phy_dev);
- mtk_irq_disable(eth, ~0);
+ phy_disconnect(dev->phydev);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
-
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
default:
break;
}
@@ -1528,6 +1949,12 @@ static void mtk_pending_work(struct work_struct *work)
rtnl_lock();
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+ while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
+ cpu_relax();
+
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
@@ -1535,6 +1962,27 @@ static void mtk_pending_work(struct work_struct *work)
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+ /* restart underlying hardware such as power, clock, pin mux
+ * and the connected phy
+ */
+ mtk_hw_deinit(eth);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] ||
+ of_phy_is_fixed_link(eth->mac[i]->of_node))
+ continue;
+ err = phy_init_hw(eth->netdev[i]->phydev);
+ if (err)
+ dev_err(eth->dev, "%s: PHY init failed.\n",
+ eth->netdev[i]->name);
+ }
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
@@ -1547,51 +1995,69 @@ static void mtk_pending_work(struct work_struct *work)
dev_close(eth->netdev[i]);
}
}
+
+ dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+ clear_bit_unlock(MTK_RESETTING, &eth->state);
+
rtnl_unlock();
}
-static int mtk_cleanup(struct mtk_eth *eth)
+static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
}
+
+ return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
return 0;
}
-static int mtk_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
+ struct mtk_mac *mac = netdev_priv(ndev);
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_gset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_get(ndev->phydev, cmd);
}
-static int mtk_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_mac *mac = netdev_priv(ndev);
- if (cmd->phy_address != mac->phy_dev->mdio.addr) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- }
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_sset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -1622,7 +2088,10 @@ static int mtk_nway_reset(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
- return genphy_restart_aneg(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return genphy_restart_aneg(dev->phydev);
}
static u32 mtk_get_link(struct net_device *dev)
@@ -1630,11 +2099,14 @@ static u32 mtk_get_link(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
int err;
- err = genphy_update_link(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ err = genphy_update_link(dev->phydev);
if (err)
return ethtool_op_get_link(dev);
- return mac->phy_dev->link;
+ return dev->phydev->link;
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1670,6 +2142,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
unsigned int start;
int i;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
if (netif_running(dev) && netif_device_present(dev)) {
if (spin_trylock(&hwstats->stats_lock)) {
mtk_stats_update_mac(mac);
@@ -1677,8 +2152,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
}
}
+ data_src = (u64 *)hwstats;
+
do {
- data_src = (u64*)hwstats;
data_dst = data;
start = u64_stats_fetch_begin_irq(&hwstats->syncp);
@@ -1687,9 +2163,65 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
-static struct ethtool_ops mtk_ethtool_ops = {
- .get_settings = mtk_get_settings,
- .set_settings = mtk_set_settings,
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
.get_drvinfo = mtk_get_drvinfo,
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
@@ -1698,6 +2230,8 @@ static struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -1712,6 +2246,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
@@ -1750,6 +2286,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw = eth;
mac->of_node = np;
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
@@ -1766,21 +2305,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
eth->netdev[id]->features |= MTK_HW_FEATURES;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- goto free_netdev;
- }
eth->netdev[id]->irq = eth->irq[0];
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->irq[0]);
-
return 0;
free_netdev:
@@ -1827,11 +2362,7 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
- eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
- if (IS_ERR(eth->rstc)) {
- dev_err(&pdev->dev, "no eth reset found\n");
- return PTR_ERR(eth->rstc);
- }
+ eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
@@ -1850,11 +2381,6 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
- clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
- clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
- clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
-
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@@ -1872,7 +2398,35 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_add_mac(eth, mac_np);
if (err)
- goto err_free_dev;
+ goto err_deinit_hw;
+ }
+
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_mdio;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
@@ -1888,8 +2442,13 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_deinit_mdio:
+ mtk_mdio_cleanup(eth);
err_free_dev:
- mtk_cleanup(eth);
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+
return err;
}
@@ -1905,16 +2464,12 @@ static int mtk_remove(struct platform_device *pdev)
mtk_stop(eth->netdev[i]);
}
- clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
- clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
- clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
- clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+ mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 6e1ade7a25c5..30031959d6de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -39,7 +39,21 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
+#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
@@ -50,6 +64,9 @@
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
@@ -68,10 +85,77 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+/* PDMA RX Base Pointer Register */
+#define MTK_PRX_BASE_PTR0 0x900
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+
+/* PDMA RX Maximum Count Register */
+#define MTK_PRX_MAX_CNT0 0x904
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+
+/* PDMA RX CPU Pointer Register */
+#define MTK_PRX_CRX_IDX0 0x908
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 0x980
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 0x984
+#define MTK_PDMA_LRO_CTRL_DW2 0x988
+#define MTK_PDMA_LRO_CTRL_DW3 0x98c
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_MULTI_EN BIT(10)
+
+/* PDMA Reset Index Register */
+#define MTK_PDMA_RST_IDX 0xa08
+#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_INT 0xa0c
+
+/* PDMA Interrupt Status Register */
+#define MTK_PDMA_INT_STATUS 0xa20
+
+/* PDMA Interrupt Mask Register */
+#define MTK_PDMA_INT_MASK 0xa28
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
+
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
+#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
+#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -106,7 +190,6 @@
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_PST_DRX_IDX0 BIT(16)
/* QDMA Delay Interrupt Register */
#define MTK_QDMA_DELAY_INT 0x1A0C
@@ -119,13 +202,16 @@
/* QDMA Interrupt Status Register */
#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_INT3 BIT(19)
+#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
#define MTK_RX_DONE_INT0 BIT(16)
#define MTK_TX_DONE_INT3 BIT(3)
#define MTK_TX_DONE_INT2 BIT(2)
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
-#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1)
+#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \
+ MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3)
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
@@ -227,6 +313,30 @@
MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -237,6 +347,15 @@
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -298,9 +417,15 @@ enum mtk_clks_map {
MTK_CLK_ESW,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_TRGPLL,
MTK_CLK_MAX
};
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -338,6 +463,12 @@ struct mtk_tx_ring {
atomic_t free_count;
};
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+};
+
/* struct mtk_rx_ring - This struct holds info describing a RX ring
* @dma: The descriptor ring
* @data: The memory pointed at by the ring
@@ -352,7 +483,10 @@ struct mtk_rx_ring {
dma_addr_t phys;
u16 frag_size;
u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
u16 calc_idx;
+ u32 crx_idx_reg;
};
/* currently no SoC has more than 2 macs */
@@ -384,12 +518,12 @@ struct mtk_rx_ring {
* @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
+ * @state Initialization and runtime state of the device.
*/
struct mtk_eth {
struct device *dev;
void __iomem *base;
- struct reset_control *rstc;
spinlock_t page_lock;
spinlock_t irq_lock;
struct net_device dummy_dev;
@@ -400,9 +534,10 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
+ bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
@@ -412,22 +547,28 @@ struct mtk_eth {
struct mii_bus *mii_bus;
struct work_struct pending_work;
+ unsigned long state;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
* SoC
* @id: The number of the MAC
+ * @ge_mode: Interface mode kept for setup restoring
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
+ * @trgmii Indicate if the MAC uses TRGMII connected to internal
+ switch
*/
struct mtk_mac {
int id;
+ int ge_mode;
struct device_node *of_node;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+ bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f04a423ff79d..b1cef7a0f7ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ int ret;
+
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op,
op_modifier);
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
@@ -1845,6 +1851,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
+ vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.link_state == vp_admin->link_state &&
vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
@@ -1903,6 +1910,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.qos_vport = vp_admin->qos_vport;
@@ -1916,6 +1924,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
+ work->vlan_proto = vp_oper->state.vlan_proto;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
@@ -1986,6 +1995,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
int min_port = find_first_bit(actv_ports.ports,
@@ -2000,12 +2011,26 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vp_oper->state = *vp_admin;
+ if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
+ slave_state->vst_qinq_supported) {
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+ }
+ vp_oper->state.link_state = vp_admin->link_state;
+ vp_oper->state.mac = vp_admin->mac;
+ vp_oper->state.spoofchk = vp_admin->spoofchk;
+ vp_oper->state.tx_rate = vp_admin->tx_rate;
+ vp_oper->state.qos_vport = vp_admin->qos_vport;
+ vp_oper->state.guid = vp_admin->guid;
+
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
+ vp_oper->state.default_vlan = MLX4_VGT;
+ vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
@@ -2086,6 +2111,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
+ slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2353,6 +2379,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
@@ -2382,6 +2409,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
admin_vport->qos_vport =
MLX4_VPP_DEFAULT_VPORT;
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+ admin_vport->vlan_proto = htons(ETH_P_8021Q);
+ oper_vport->vlan_proto = htons(ETH_P_8021Q);
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
mlx4_set_random_admin_guid(dev, i, port);
@@ -2454,6 +2483,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0;
if (!priv->cmd.initialized) {
+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -2583,6 +2613,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
@@ -2606,6 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
return err;
}
@@ -2618,6 +2650,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
@@ -2626,6 +2659,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
@@ -2937,10 +2971,13 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
+ __be16 proto)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *vf_admin;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_oper_state *vf_oper;
int slave;
if ((!mlx4_is_master(dev)) ||
@@ -2950,12 +2987,31 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (proto == htons(ETH_P_8021AD) &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
+ return -EPROTONOSUPPORT;
+
+ if (proto != htons(ETH_P_8021Q) &&
+ proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ if ((proto == htons(ETH_P_8021AD)) &&
+ ((vlan == 0) || (vlan == MLX4_VGT)))
+ return -EINVAL;
+
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
+ slave_state = &priv->mfunc.master.slave_state[slave];
+ if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
+ (!slave_state->vst_qinq_supported)) {
+ mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
+ return -EPROTONOSUPPORT;
+ }
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
return -EPERM;
@@ -2965,6 +3021,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
+ vf_admin->vlan_proto = proto;
/* If rate was configured prior to VST, we saved the configured rate
* in vf_admin->rate and now, if priority supported we enforce the QoS
@@ -2973,7 +3030,12 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
vf_admin->tx_rate)
vf_admin->qos_vport = slave;
- if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
+ /* Try to activate new vf state without restart,
+ * this option is not supported while moving to VST QinQ mode.
+ */
+ if ((proto == htons(ETH_P_8021AD) &&
+ vf_oper->state.vlan_proto != proto) ||
+ mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
vf, port);
@@ -3117,6 +3179,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
+ ivf->vlan_proto = s_info->vlan_proto;
if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
ivf->max_tx_rate = s_info->tx_rate;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..08fc5fc56d43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -298,7 +298,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
mlx4_err(mdev, "ptp_clock_register failed\n");
- } else {
+ } else if (mdev->ptp_clock) {
mlx4_info(mdev, "registered PHC clock\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fedb829276f4..7e703bed7b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2400,12 +2400,14 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}
-static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+ vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
@@ -2643,12 +2645,16 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (IS_ERR(prog))
return PTR_ERR(prog);
}
+ mutex_lock(&mdev->state_lock);
for (i = 0; i < priv->rx_ring_num; i++) {
- /* This xchg is paired with READ_ONCE in the fastpath */
- old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ old_prog = rcu_dereference_protected(
+ priv->rx_ring[i]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
+ mutex_unlock(&mdev->state_lock);
return 0;
}
@@ -2681,7 +2687,10 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
priv->xdp_ring_num);
for (i = 0; i < priv->rx_ring_num; i++) {
- old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ old_prog = rcu_dereference_protected(
+ priv->rx_ring[i]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
@@ -3217,6 +3226,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mlx4_is_slave(mdev->dev)) {
+ bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
@@ -3224,6 +3234,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
+ err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
+ &vlan_offload_disabled);
+ if (!err && vlan_offload_disabled) {
+ dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2040dad8611d..f2e8beddcf44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
frag_info->dma_dir);
- if (dma_mapping_error(priv->ddev, dma)) {
+ if (unlikely(dma_mapping_error(priv->ddev, dma))) {
put_page(page);
return -ENOMEM;
}
@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
ring_alloc[i].page_size)
continue;
- if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
+ if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
+ frag_info, gfp)))
goto out;
}
@@ -537,7 +538,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring = *pring;
struct bpf_prog *old_prog;
- old_prog = READ_ONCE(ring->xdp_prog);
+ old_prog = rcu_dereference_protected(
+ ring->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
@@ -583,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
- if (!frags[nr].page)
+ if (unlikely(!frags[nr].page))
goto fail;
dma = be64_to_cpu(rx_desc->data[nr].addr);
@@ -623,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (!skb) {
+ if (unlikely(!skb)) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
@@ -734,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
{
__wsum csum_pseudo_hdr = 0;
- if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
+ ipv6h->nexthdr == IPPROTO_HOPOPTS))
return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
@@ -767,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
return -1;
#endif
return 0;
@@ -794,13 +798,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
u64 timestamp;
bool l2_tunnel;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return 0;
- if (budget <= 0)
+ if (unlikely(budget <= 0))
return polled;
- xdp_prog = READ_ONCE(ring->xdp_prog);
+ /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(ring->xdp_prog);
doorbell_pending = 0;
tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
@@ -858,15 +864,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
- rcu_read_lock();
hlist_for_each_entry_rcu(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
- ethh->h_source)) {
- rcu_read_unlock();
+ ethh->h_source))
goto next;
- }
}
- rcu_read_unlock();
}
}
@@ -902,16 +904,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (!mlx4_en_xmit_frame(frags, dev,
+ if (likely(!mlx4_en_xmit_frame(frags, dev,
length, tx_index,
- &doorbell_pending))
+ &doorbell_pending)))
goto consumed;
- break;
+ goto xdp_drop; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
- if (mlx4_en_rx_recycle(ring, frags))
+xdp_drop:
+ if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
@@ -1015,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (!skb) {
+ if (unlikely(!skb)) {
ring->dropped++;
goto next;
}
- if (unlikely(priv->validate_loopback)) {
+ if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb);
goto next;
}
@@ -1077,6 +1080,7 @@ consumed:
}
out:
+ rcu_read_unlock();
if (doorbell_pending)
mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d728704d0c7b..f9cbc67f1694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -158,7 +158,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
- [34] = "DMFS Sniffer support (UC & MC)"
+ [34] = "DMFS Sniffer support (UC & MC)",
+ [35] = "QinQ VST mode support",
};
int i;
@@ -248,6 +249,72 @@ out:
return err;
}
+static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+ if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &vp_oper->vlan_idx);
+ if (err) {
+ vp_oper->vlan_idx = NO_INDX;
+ mlx4_warn(&priv->dev,
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
+ (int)(vp_oper->state.default_vlan),
+ vp_oper->vlan_idx, slave, port);
+ }
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+
+ return 0;
+}
+
+static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ slave_state = &priv->mfunc.master.slave_state[slave];
+
+ if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
+ (!slave_state->active))
+ return 0;
+
+ if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
+ vp_oper->state.default_vlan == vp_admin->default_vlan &&
+ vp_oper->state.default_qos == vp_admin->default_qos)
+ return 0;
+
+ if (!slave_state->vst_qinq_supported) {
+ /* Warn and revert the request to set vst QinQ mode */
+ vp_admin->vlan_proto = vp_oper->state.vlan_proto;
+ vp_admin->default_vlan = vp_oper->state.default_vlan;
+ vp_admin->default_qos = vp_oper->state.default_qos;
+
+ mlx4_warn(&priv->dev,
+ "Slave %d does not support VST QinQ mode\n", slave);
+ return 0;
+ }
+
+ err = mlx4_activate_vst_qinq(priv, slave, port);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -311,14 +378,18 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
-#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
#define QUERY_FUNC_CAP_PHV_BIT 0x40
+#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
+
+#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
int converted_port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier);
+ struct mlx4_vport_oper_state *vp_oper;
if (converted_port < 0)
return -EINVAL;
@@ -357,15 +428,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
- if (dev->caps.phv_bit[port]) {
- field = QUERY_FUNC_CAP_PHV_BIT;
- MLX4_PUT(outbox->buf, field,
- QUERY_FUNC_CAP_FLAGS0_OFFSET);
- }
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ err = mlx4_handle_vst_qinq(priv, slave, port);
+ if (err)
+ return err;
+
+ field = 0;
+ if (dev->caps.phv_bit[port])
+ field |= QUERY_FUNC_CAP_PHV_BIT;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
+
/* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
@@ -439,6 +519,10 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+
+ if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
+ slave_state->vst_qinq_supported = true;
+
} else
err = -EINVAL;
@@ -454,10 +538,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
u32 size, qkey;
int err = 0, quotas = 0;
u32 in_modifier;
+ u32 slave_caps;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
- in_modifier = op_modifier ? gen_or_port :
+ slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
+ in_modifier = op_modifier ? gen_or_port : slave_caps;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -612,8 +698,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
- func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+ MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
@@ -690,6 +775,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
@@ -767,12 +853,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
dev_cap->reserved_mtts = 1 << (field >> 4);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
- dev_cap->max_mrw_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
dev_cap->reserved_mrws = 1 << (field & 0xf);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
- dev_cap->max_mtt_seg = 1 << (field & 0x3f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
@@ -857,6 +939,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
+ if (field & 0x1)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
@@ -2914,7 +2999,7 @@ int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
- *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
@@ -2938,6 +3023,22 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
}
EXPORT_SYMBOL(set_phv_bit);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled)
+{
+ struct mlx4_func_cap func_cap;
+ int err;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *vlan_offload_disabled =
+ !!(func_cap.flags0 &
+ QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
+
void mlx4_replace_zero_macs(struct mlx4_dev *dev)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cdbd76f10ced..5343a0599253 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -80,9 +80,7 @@ struct mlx4_dev_cap {
int max_eqs;
int num_sys_eqs;
int reserved_mtts;
- int max_mrw_sz;
int reserved_mrws;
- int max_mtt_seg;
int max_requester_per_qp;
int max_responder_per_qp;
int max_rdma_global;
@@ -152,7 +150,7 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u32 reserved_lkey;
u8 physical_port;
- u8 port_flags;
+ u8 flags0;
u8 flags1;
u64 phys_port_id;
u32 extra_flags;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c9d7fc5159f2..e4878f31e45d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,6 +46,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <net/devlink.h>
+#include <linux/rwsem.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -482,6 +483,7 @@ struct mlx4_slave_state {
u8 init_port_mask;
bool active;
bool old_vlan_api;
+ bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -507,6 +509,7 @@ struct mlx4_vport_state {
u64 mac;
u16 default_vlan;
u8 default_qos;
+ __be16 vlan_proto;
u32 tx_rate;
bool spoofchk;
u32 link_state;
@@ -627,6 +630,7 @@ struct mlx4_cmd {
struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -655,6 +659,7 @@ struct mlx4_vf_immed_vlan_work {
u8 qos_vport;
u16 vlan_id;
u16 orig_vlan_id;
+ __be16 vlan_proto;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9099dbd04951..a3528dd1e72e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -340,7 +340,7 @@ struct mlx4_en_rx_ring {
u8 fcs_del;
void *buf;
void *rx_info;
- struct bpf_prog *xdp_prog;
+ struct bpf_prog __rcu *xdp_prog;
struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8b81114bdc72..84d7857ccc27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -790,10 +790,22 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control |=
- MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
+ /* vst QinQ should block untagged on TX,
+ * but cvlan is in payload and phv is set so
+ * hw see it as untagged. Block tagged instead.
+ */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* vst 802.1Q */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ }
} else { /* priority tagged */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
@@ -802,7 +814,11 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ qpc->pri_path.fl |= MLX4_FL_SV;
+ else
+ qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
@@ -5238,6 +5254,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
@@ -5266,7 +5283,12 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
- else
+ else if (work->vlan_proto == htons(ETH_P_8021AD))
+ vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
@@ -5311,7 +5333,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
- qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (work->vlan_proto == htons(ETH_P_8021AD))
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
+ else
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 67146624eb58..f44d089e2ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- spin_lock(&srq_table->lock);
-
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ rcu_read_unlock();
if (srq)
atomic_inc(&srq->refcount);
-
- spin_unlock(&srq_table->lock);
-
- if (!srq) {
+ else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- unsigned long flags;
- spin_lock_irqsave(&srq_table->lock, flags);
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
- spin_unlock_irqrestore(&srq_table->lock, flags);
+ rcu_read_unlock();
return srq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 05cc1effc13c..0343725d7f44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o
+ fs_counters.o rl.o lag.o dev.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c2ec01a22d55..1e639f886021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,11 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DETACH_FROM_MCG:
case MLX5_CMD_OP_DEALLOC_XRCD:
case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_LAG:
+ case MLX5_CMD_OP_DESTROY_VPORT_LAG:
case MLX5_CMD_OP_DESTROY_TIR:
case MLX5_CMD_OP_DESTROY_SQ:
case MLX5_CMD_OP_DESTROY_RQ:
@@ -315,6 +317,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+ case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -389,6 +392,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_LAG:
+ case MLX5_CMD_OP_MODIFY_LAG:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_CREATE_VPORT_LAG:
case MLX5_CMD_OP_CREATE_TIR:
case MLX5_CMD_OP_MODIFY_TIR:
case MLX5_CMD_OP_QUERY_TIR:
@@ -416,6 +423,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -504,7 +512,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
MLX5_COMMAND_STR_CASE(ACCESS_REG);
MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
- MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
MLX5_COMMAND_STR_CASE(MAD_IFC);
MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
@@ -526,6 +534,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(CREATE_LAG);
+ MLX5_COMMAND_STR_CASE(MODIFY_LAG);
+ MLX5_COMMAND_STR_CASE(QUERY_LAG);
+ MLX5_COMMAND_STR_CASE(DESTROY_LAG);
+ MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
+ MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
MLX5_COMMAND_STR_CASE(CREATE_TIR);
MLX5_COMMAND_STR_CASE(MODIFY_TIR);
MLX5_COMMAND_STR_CASE(DESTROY_TIR);
@@ -564,15 +578,130 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
default: return "unknown command opcode";
}
}
+static const char *cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK:
+ return "OK";
+ case MLX5_CMD_STAT_INT_ERR:
+ return "internal error";
+ case MLX5_CMD_STAT_BAD_OP_ERR:
+ return "bad operation";
+ case MLX5_CMD_STAT_BAD_PARAM_ERR:
+ return "bad parameter";
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
+ return "bad system state";
+ case MLX5_CMD_STAT_BAD_RES_ERR:
+ return "bad resource";
+ case MLX5_CMD_STAT_RES_BUSY:
+ return "resource busy";
+ case MLX5_CMD_STAT_LIM_ERR:
+ return "limits exceeded";
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
+ return "bad resource state";
+ case MLX5_CMD_STAT_IX_ERR:
+ return "bad index";
+ case MLX5_CMD_STAT_NO_RES_ERR:
+ return "no resources";
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
+ return "bad input length";
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
+ return "bad output length";
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
+ return "bad QP state";
+ case MLX5_CMD_STAT_BAD_PKT_ERR:
+ return "bad packet (discarded)";
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
+ return "bad size too many outstanding CQEs";
+ default:
+ return "unknown status";
+ }
+}
+
+static int cmd_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK: return 0;
+ case MLX5_CMD_STAT_INT_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
+ default: return -EIO;
+ }
+}
+
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+{
+ *status = MLX5_GET(mbox_out, out, status);
+ *syndrome = MLX5_GET(mbox_out, out, syndrome);
+}
+
+static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+{
+ u32 syndrome;
+ u8 status;
+ u16 opcode;
+ u16 op_mod;
+
+ mlx5_cmd_mbox_status(out, &status, &syndrome);
+ if (!status)
+ return 0;
+
+ opcode = MLX5_GET(mbox_in, in, opcode);
+ op_mod = MLX5_GET(mbox_in, in, op_mod);
+
+ mlx5_core_err(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+ mlx5_command_str(opcode),
+ opcode, op_mod,
+ cmd_status_str(status),
+ status,
+ syndrome);
+
+ return cmd_status_to_err(status);
+}
+
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
- u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+ u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int data_only;
u32 offset = 0;
@@ -622,9 +751,7 @@ static void dump_command(struct mlx5_core_dev *dev,
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
+ return MLX5_GET(mbox_in, in->first.data, opcode);
}
static void cb_timeout_handler(struct work_struct *work)
@@ -762,16 +889,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
-static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->syndrome;
-}
-
-static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->status;
-}
-
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -820,7 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ op = MLX5_GET(mbox_in, in->first.data, opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
@@ -1035,7 +1152,6 @@ static ssize_t data_write(struct file *filp, const char __user *buf,
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
void *ptr;
- int err;
if (*pos != 0)
return -EINVAL;
@@ -1043,25 +1159,15 @@ static ssize_t data_write(struct file *filp, const char __user *buf,
kfree(dbg->in_msg);
dbg->in_msg = NULL;
dbg->inlen = 0;
-
- ptr = kzalloc(count, GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- if (copy_from_user(ptr, buf, count)) {
- err = -EFAULT;
- goto out;
- }
+ ptr = memdup_user(buf, count);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
dbg->in_msg = ptr;
dbg->inlen = count;
*pos = count;
return count;
-
-out:
- kfree(ptr);
- return err;
}
static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
@@ -1321,11 +1427,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
callback = ent->callback;
context = ent->context;
err = ent->ret;
- if (!err)
+ if (!err) {
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
+ err = err ? err : mlx5_cmd_check(dev,
+ ent->in->first.data,
+ ent->uout);
+ }
+
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
@@ -1377,14 +1488,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
-static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
-{
- return be16_to_cpu(in->opcode);
-}
-
-static int is_manage_pages(struct mlx5_inbox_hdr *in)
+static int is_manage_pages(void *in)
{
- return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
@@ -1401,9 +1507,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
- *get_synd_ptr(out) = cpu_to_be32(drv_synd);
- *get_status_ptr(out) = status;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, drv_synd);
return err;
}
@@ -1457,7 +1565,10 @@ out_in:
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ int err;
+
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@@ -1694,96 +1805,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
pci_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
-
-static const char *cmd_status_str(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK:
- return "OK";
- case MLX5_CMD_STAT_INT_ERR:
- return "internal error";
- case MLX5_CMD_STAT_BAD_OP_ERR:
- return "bad operation";
- case MLX5_CMD_STAT_BAD_PARAM_ERR:
- return "bad parameter";
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
- return "bad system state";
- case MLX5_CMD_STAT_BAD_RES_ERR:
- return "bad resource";
- case MLX5_CMD_STAT_RES_BUSY:
- return "resource busy";
- case MLX5_CMD_STAT_LIM_ERR:
- return "limits exceeded";
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
- return "bad resource state";
- case MLX5_CMD_STAT_IX_ERR:
- return "bad index";
- case MLX5_CMD_STAT_NO_RES_ERR:
- return "no resources";
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
- return "bad input length";
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
- return "bad output length";
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
- return "bad QP state";
- case MLX5_CMD_STAT_BAD_PKT_ERR:
- return "bad packet (discarded)";
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
- return "bad size too many outstanding CQEs";
- default:
- return "unknown status";
- }
-}
-
-static int cmd_status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK: return 0;
- case MLX5_CMD_STAT_INT_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
- case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
- case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
- case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
- default: return -EIO;
- }
-}
-
-/* this will be available till all the commands use set/get macros */
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
-{
- if (!hdr->status)
- return 0;
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(hdr->status), hdr->status,
- be32_to_cpu(hdr->syndrome));
-
- return cmd_status_to_err(hdr->status);
-}
-
-int mlx5_cmd_status_to_err_v2(void *ptr)
-{
- u32 syndrome;
- u8 status;
-
- status = be32_to_cpu(*(__be32 *)ptr) >> 24;
- if (!status)
- return 0;
-
- syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(status), status, syndrome);
-
- return cmd_status_to_err(status);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 873a631ad155..32d4af9b594d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete(&cq->free);
}
-
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen)
+ u32 *in, int inlen)
{
- int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_create_cq_mbox_out out;
- struct mlx5_destroy_cq_mbox_in din;
- struct mlx5_destroy_cq_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn);
struct mlx5_eq *eq;
+ int err;
eq = mlx5_eqn2eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
- memset(&out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
+ cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_destroy_cq_mbox_in in;
- struct mlx5_destroy_cq_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out)
+ u32 *out, int outlen)
{
- struct mlx5_query_cq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
- return err;
+ MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
+ MLX5_SET(query_cq_in, in, cqn, cq->cqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
-
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz)
+ u32 *in, int inlen)
{
- struct mlx5_modify_cq_mbox_out out;
- int err;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
- err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return 0;
+ MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- struct mlx5_modify_cq_mbox_in in;
-
- memset(&in, 0, sizeof(in));
-
- in.cqn = cpu_to_be32(cq->cqn);
- in.ctx.cq_period = cpu_to_be16(cq_period);
- in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
- in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
- MLX5_CQ_MODIFY_COUNT);
-
- return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
+EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 5210d92e6bc7..e94a9532e218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
- struct mlx5_query_qp_mbox_out *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *ctx;
u64 param = 0;
+ u32 *out;
int err;
int no_sq;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
+ err = mlx5_core_qp_query(dev, qp, out, outlen);
if (err) {
- mlx5_core_warn(dev, "failed to query qp\n");
+ mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
goto out;
}
*is_str = 0;
- ctx = &out->ctx;
+
+ /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+ ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
+
switch (index) {
case QP_PID:
param = qp->pid;
@@ -358,32 +362,32 @@ out:
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
- struct mlx5_query_eq_mbox_out *out;
- struct mlx5_eq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
+ err = mlx5_core_eq_query(dev, eq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
switch (index) {
case EQ_NUM_EQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
break;
case EQ_INTR:
- param = ctx->intr;
+ param = MLX5_GET(eqc, ctx, intr);
break;
case EQ_LOG_PG_SZ:
- param = (ctx->log_page_size & 0x1f) + 12;
+ param = MLX5_GET(eqc, ctx, log_page_size) + 12;
break;
}
@@ -395,37 +399,37 @@ out:
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
- struct mlx5_query_cq_mbox_out *out;
- struct mlx5_cq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = mlx5_vzalloc(outlen);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_query_cq(dev, cq, out);
+ err = mlx5_core_query_cq(dev, cq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break;
case CQ_LOG_PG_SZ:
- param = (ctx->log_pg_sz & 0x1f) + 12;
+ param = MLX5_GET(cqc, ctx, log_page_size);
break;
}
out:
- kfree(out);
+ kvfree(out);
return param;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
new file mode 100644
index 000000000000..a9dbc28f6b97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(mlx5_dev_list);
+/* intf dev list mutex */
+static DEFINE_MUTEX(mlx5_intf_mutex);
+
+struct mlx5_device_context {
+ struct list_head list;
+ struct mlx5_interface *intf;
+ void *context;
+ unsigned long state;
+};
+
+enum {
+ MLX5_INTERFACE_ADDED,
+ MLX5_INTERFACE_ATTACHED,
+};
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ if (!mlx5_lag_intf_add(intf, priv))
+ return;
+
+ dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ if (intf->attach)
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
+ struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf)
+ return dev_ctx;
+ return NULL;
+}
+
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ intf->remove(dev, dev_ctx->context);
+
+ kfree(dev_ctx);
+}
+
+static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->attach) {
+ if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->attach(dev, dev_ctx->context);
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_attach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_attach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->detach) {
+ if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->detach(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ intf->remove(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_detach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_detach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+bool mlx5_device_registered(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv;
+ bool found = false;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ if (priv == &dev->priv)
+ found = true;
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return found;
+}
+
+int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&priv->dev_list, &mlx5_dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+
+void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+/* Must be called with intf_mutex held */
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_add_device(intf, &dev->priv);
+ break;
+ }
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_remove_device(intf, &dev->priv);
+ break;
+ }
+}
+
+static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+{
+ return (u16)((dev->pdev->bus->number << 8) |
+ PCI_SLOT(dev->pdev->devfn));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+ u16 pci_id = mlx5_gen_pci_id(dev);
+ struct mlx5_core_dev *res = NULL;
+ struct mlx5_core_dev *tmp_dev;
+ struct mlx5_priv *priv;
+
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
+ tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+ if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
+ res = tmp_dev;
+ break;
+ }
+ }
+
+ return res;
+}
+
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+void mlx5_dev_list_lock(void)
+{
+ mutex_lock(&mlx5_intf_mutex);
+}
+
+void mlx5_dev_list_unlock(void)
+{
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_dev_list_trylock(void)
+{
+ return mutex_trylock(&mlx5_intf_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bf722aa88cf0..460363b66cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -62,12 +62,14 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+#define MLX5_RX_HEADROOM NET_SKB_PAD
+
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
@@ -99,6 +101,18 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_ICOSQ_MAX_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_IHS_DS_COUNT \
+ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT \
+ (MLX5E_XDP_IHS_DS_COUNT + \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_WQEBBS \
+ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_NUM_MAIN_GROUPS 9
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -287,29 +301,53 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
u8 tired;
};
+/* a single cache unit is capable to serve one napi call (for non-striding rq)
+ * or a MPWQE (for striding rq).
+ */
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
+struct mlx5e_page_cache {
+ u32 head;
+ u32 tail;
+ struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
- u32 wqe_sz;
- struct sk_buff **skb;
- struct mlx5e_mpw_info *wqe_info;
+
+ union {
+ struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_mpw_info *info;
+ void *mtt_no_align;
+ u32 mtt_offset;
+ } mpwqe;
+ };
+ struct {
+ u8 page_order;
+ u32 wqe_sz; /* wqe data buffer size */
+ u8 map_dir; /* dma map direction */
+ } buff;
__be32 mkey_be;
- __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ struct mlx5e_page_cache page_cache;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
- u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct bpf_prog *xdp_prog;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -323,32 +361,15 @@ struct mlx5e_rq {
struct mlx5e_umr_dma_info {
__be64 *mtt;
- __be64 *mtt_no_align;
dma_addr_t mtt_addr;
- struct mlx5e_dma_info *dma_info;
+ struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+ struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
- union {
- struct mlx5e_dma_info dma_info;
- struct mlx5e_umr_dma_info umr;
- };
+ struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-
- void (*dma_pre_sync)(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len);
- void (*add_skb_frag)(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset, u32 len);
- void (*copy_skb_header)(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen);
- void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
};
struct mlx5e_tx_wqe_info {
@@ -373,11 +394,17 @@ enum {
MLX5E_SQ_STATE_BF_ENABLE,
};
-struct mlx5e_ico_wqe_info {
+struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
+enum mlx5e_sq_type {
+ MLX5E_SQ_TXQ,
+ MLX5E_SQ_ICO,
+ MLX5E_SQ_XDP
+};
+
struct mlx5e_sq {
/* data path */
@@ -395,10 +422,20 @@ struct mlx5e_sq {
struct mlx5e_cq cq;
- /* pointers to per packet info: write@xmit, read@completion */
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
+ /* pointers to per tx element info: write@xmit, read@completion */
+ union {
+ struct {
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } txq;
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ struct {
+ struct mlx5e_sq_wqe_info *wqe_info;
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } xdp;
+ } db;
/* read only */
struct mlx5_wq_cyc wq;
@@ -420,8 +457,8 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
- struct mlx5e_ico_wqe_info *ico_wqe_info;
u32 rate_limit;
+ u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -437,8 +474,10 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
+ struct mlx5e_sq xdp_sq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */
+ bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -620,6 +659,7 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct bpf_prog *xdp_prog;
/* priv data path fields - end */
unsigned long state;
@@ -654,40 +694,6 @@ struct mlx5e_priv {
void *ppriv;
};
-enum mlx5e_link_mode {
- MLX5E_1000BASE_CX_SGMII = 0,
- MLX5E_1000BASE_KX = 1,
- MLX5E_10GBASE_CX4 = 2,
- MLX5E_10GBASE_KX4 = 3,
- MLX5E_10GBASE_KR = 4,
- MLX5E_20GBASE_KR2 = 5,
- MLX5E_40GBASE_CR4 = 6,
- MLX5E_40GBASE_KR4 = 7,
- MLX5E_56GBASE_R4 = 8,
- MLX5E_10GBASE_CR = 12,
- MLX5E_10GBASE_SR = 13,
- MLX5E_10GBASE_ER = 14,
- MLX5E_40GBASE_SR4 = 15,
- MLX5E_40GBASE_LR4 = 16,
- MLX5E_50GBASE_SR2 = 18,
- MLX5E_100GBASE_CR4 = 20,
- MLX5E_100GBASE_SR4 = 21,
- MLX5E_100GBASE_KR4 = 22,
- MLX5E_100GBASE_LR4 = 23,
- MLX5E_100BASE_TX = 24,
- MLX5E_1000BASE_T = 25,
- MLX5E_10GBASE_T = 26,
- MLX5E_25GBASE_CR = 27,
- MLX5E_25GBASE_KR = 28,
- MLX5E_25GBASE_SR = 29,
- MLX5E_50GBASE_CR2 = 30,
- MLX5E_50GBASE_KR2 = 31,
- MLX5E_LINK_MODES_NUMBER,
-};
-
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-
-
void mlx5e_build_ptys2ethtool_map(void);
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
@@ -700,30 +706,19 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
-void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_rx_am(struct mlx5e_rq *rq);
@@ -810,6 +805,12 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return rq->mpwqe.mtt_offset +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return min_t(int, mdev->priv.eq_table.num_comp_vectors,
@@ -868,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
@@ -878,9 +880,12 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv);
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 847a8f3ac2b2..13dc388667b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -273,7 +273,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
- if (IS_ERR_OR_NULL(tstamp->ptp)) {
+ if (IS_ERR(tstamp->ptp)) {
mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(tstamp->ptp));
tstamp->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 9cce153e1035..029e856f72a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey)
{
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 7a346bb2ed00..27ff401cec20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -805,7 +805,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_lp;
@@ -815,7 +815,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
-
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1587a9fd5724..36fbc6b21a33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -294,6 +294,36 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
return 0;
}
+static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -1024,14 +1054,10 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_vlan_flow_groups;
+ mlx5e_add_vlan_rules(priv);
return 0;
-err_destroy_vlan_flow_groups:
- mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
@@ -1043,6 +1069,7 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
@@ -1100,7 +1127,6 @@ err_destroy_arfs_tables:
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2459c7f3db8d..7eaf38020a8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <linux/bpf.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -50,7 +51,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
u16 max_inline;
u8 min_inline_mode;
- bool icosq;
+ enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -63,12 +64,55 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
struct mlx5e_cq_param icosq_cq;
};
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+{
+ priv->params.rq_wq_type = rq_type;
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ mlx5_core_info(priv->mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+}
+
+static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+{
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
+ !priv->xdp_prog ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+ mlx5e_set_rq_type_params(priv, rq_type);
+}
+
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -136,12 +180,18 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
@@ -174,18 +224,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_core_dev *mdev = priv->mdev;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
-
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
@@ -298,6 +345,117 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+ struct mlx5e_channel *c)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+ int i;
+
+ rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->mpwqe.info)
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (unlikely(!rq->mpwqe.mtt_no_align))
+ goto err_free_wqe_info;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
+ MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+ goto err_unmap_mtts;
+
+ mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+ }
+
+ return 0;
+
+err_unmap_mtts:
+ while (--i >= 0) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+err_free_wqe_info:
+ kfree(rq->mpwqe.info);
+
+err_out:
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+ kfree(rq->mpwqe.info);
+}
+
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+ if (rep && rep->vport != FDB_UPLINK_VPORT)
+ return true;
+
+ return false;
+}
+
static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -307,6 +465,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
+ u32 frag_sz;
+ int npages;
int wq_sz;
int err;
int i;
@@ -322,63 +482,92 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->wq_type = priv->params.rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->tstamp = &priv->tstamp;
+ rq->channel = c;
+ rq->ix = c->ix;
+ rq->priv = c->priv;
+ rq->xdp_prog = priv->xdp_prog;
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ if (rq->xdp_prog)
+ rq->buff.map_dir = DMA_BIDIRECTIONAL;
+
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->wqe_info) {
- err = -ENOMEM;
+ if (mlx5e_is_vf_vport_rep(priv)) {
+ err = -EINVAL;
goto err_rq_wq_destroy;
}
+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_mtt_offset = c->ix *
+ rq->mpwqe.mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
- rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
- byte_count = rq->wqe_sz;
+
+ rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->buff.wqe_sz;
+ rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ if (err)
+ goto err_rq_wq_destroy;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
+ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->dma_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
+ if (mlx5e_is_vf_vport_rep(priv))
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
+ else
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->wqe_sz = (priv->params.lro_en) ?
+ rq->buff.wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
- byte_count = rq->wqe_sz;
+ byte_count = rq->buff.wqe_sz;
+
+ /* calc the required page order */
+ frag_sz = MLX5_RX_HEADROOM +
+ byte_count /* packet data */ +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frag_sz = SKB_DATA_ALIGN(frag_sz);
+
+ npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->buff.page_order = order_base_2(npages);
+
byte_count |= MLX5_HW_START_PADDING;
+ rq->mkey_be = c->mkey_be;
}
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
wqe->data.byte_count = cpu_to_be32(byte_count);
+ wqe->data.lkey = rq->mkey_be;
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
rq->am.mode = priv->params.rx_cq_period_mode;
- rq->wq_type = priv->params.rq_wq_type;
- rq->pdev = c->pdev;
- rq->netdev = c->netdev;
- rq->tstamp = &priv->tstamp;
- rq->channel = c;
- rq->ix = c->ix;
- rq->priv = c->priv;
- rq->mkey_be = c->mkey_be;
- rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ rq->page_cache.head = 0;
+ rq->page_cache.tail = 0;
+
+ if (rq->xdp_prog)
+ bpf_prog_add(rq->xdp_prog, 1);
return 0;
@@ -390,14 +579,25 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
+ int i;
+
+ if (rq->xdp_prog)
+ bpf_prog_put(rq->xdp_prog);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->wqe_info);
+ mlx5e_rq_free_mpwqe_info(rq);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->skb);
+ kfree(rq->dma_info);
}
+ for (i = rq->page_cache.head; i != rq->page_cache.tail;
+ i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
+ struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
+
+ mlx5e_page_release(rq, dma_info, false);
+ }
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -488,7 +688,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
@@ -530,7 +731,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
@@ -565,8 +766,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -590,26 +791,65 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_destroy_rq(rq);
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
{
- kfree(sq->wqe_info);
- kfree(sq->dma_fifo);
- kfree(sq->skb);
+ kfree(sq->db.xdp.di);
+ kfree(sq->db.xdp.wqe_info);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
- sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
- numa);
- sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
- numa);
+ sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ GFP_KERNEL, numa);
+ sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+ mlx5e_free_sq_xdp_db(sq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.ico_wqe);
+}
- if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
- mlx5e_free_sq_db(sq);
+static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+{
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.ico_wqe)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.txq.wqe_info);
+ kfree(sq->db.txq.dma_fifo);
+ kfree(sq->db.txq.skb);
+}
+
+static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
+ GFP_KERNEL, numa);
+ sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
+ mlx5e_free_sq_txq_db(sq);
return -ENOMEM;
}
@@ -618,6 +858,46 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
return 0;
}
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_sq_txq_db(sq);
+ break;
+ case MLX5E_SQ_ICO:
+ mlx5e_free_sq_ico_db(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_sq_xdp_db(sq);
+ break;
+ }
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ return mlx5e_alloc_sq_txq_db(sq, numa);
+ case MLX5E_SQ_ICO:
+ return mlx5e_alloc_sq_ico_db(sq, numa);
+ case MLX5E_SQ_XDP:
+ return mlx5e_alloc_sq_xdp_db(sq, numa);
+ }
+
+ return 0;
+}
+
+static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+{
+ switch (sq_type) {
+ case MLX5E_SQ_ICO:
+ return MLX5E_ICOSQ_MAX_WQEBBS;
+ case MLX5E_SQ_XDP:
+ return MLX5E_XDP_TX_WQEBBS;
+ }
+ return MLX5_SEND_WQE_MAX_WQEBBS;
+}
+
static int mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
@@ -630,6 +910,13 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
+ sq->type = param->type;
+ sq->pdev = c->pdev;
+ sq->tstamp = &priv->tstamp;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -658,18 +945,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- if (param->icosq) {
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
- sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
- wq_sz,
- GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!sq->ico_wqe_info) {
- err = -ENOMEM;
- goto err_free_sq_db;
- }
- } else {
+ if (sq->type == MLX5E_SQ_TXQ) {
int txq_ix;
txq_ix = c->ix + tc * priv->params.num_channels;
@@ -677,19 +953,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
priv->txq_to_sq_map[txq_ix] = sq;
}
- sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
return 0;
-err_free_sq_db:
- mlx5e_free_sq_db(sq);
-
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -704,7 +972,6 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
- kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -733,11 +1000,12 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
+ 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -852,12 +1120,14 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1))
+ if (mlx5e_sq_has_room_for(sq, 1)) {
+ sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
mlx5e_send_nop(sq, true);
+ }
}
mlx5e_disable_sq(sq);
- mlx5e_free_tx_descs(sq);
+ mlx5e_free_sq_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1218,14 +1488,31 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
+ if (priv->xdp_prog) {
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation);
+ if (err)
+ goto err_close_sqs;
+
+ err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
+ if (err) {
+ mlx5e_close_cq(&c->xdp_sq.cq);
+ goto err_close_sqs;
+ }
+ }
+
+ c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
- goto err_close_sqs;
+ goto err_close_xdp_sq;
netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
+err_close_xdp_sq:
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1254,9 +1541,13 @@ err_napi_del:
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
mlx5e_close_sqs(c);
mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1330,6 +1621,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
param->max_inline = priv->params.tx_max_inline;
param->min_inline_mode = priv->params.tx_min_inline_mode;
+ param->type = MLX5E_SQ_TXQ;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1403,7 +1695,22 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- param->icosq = true;
+ param->type = MLX5E_SQ_ICO;
+}
+
+static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
+ param->max_inline = priv->params.tx_max_inline;
+ /* FOR XDP SQs will support only L2 inline mode */
+ param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+ param->type = MLX5E_SQ_XDP;
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
@@ -1412,6 +1719,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_chan
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
@@ -1885,6 +2193,9 @@ int mlx5e_close(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ if (!netif_device_present(netdev))
+ return -ENODEV;
+
mutex_lock(&priv->state_lock);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -1999,14 +2310,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- memset(in, 0, sizeof(in));
-
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+ if (mlx5_lag_is_lacp_owner(mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -2605,11 +2917,15 @@ static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}
-static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
vlan, qos);
}
@@ -2786,6 +3102,106 @@ static void mlx5e_tx_timeout(struct net_device *dev)
schedule_work(&priv->tx_timeout_work);
}
+static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+ bool reset, was_opened;
+ int i;
+
+ mutex_lock(&priv->state_lock);
+
+ if ((netdev->features & NETIF_F_LRO) && prog) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ /* no need for full reset when exchanging programs */
+ reset = (!priv->xdp_prog || !prog);
+
+ if (was_opened && reset)
+ mlx5e_close_locked(netdev);
+
+ /* exchange programs */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (prog)
+ bpf_prog_add(prog, 1);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (reset) /* change RQ type according to priv->xdp_prog */
+ mlx5e_set_rq_priv_params(priv);
+
+ if (was_opened && reset)
+ mlx5e_open_locked(netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+ goto unlock;
+
+ /* exchanging programs w/o reset, we update ref counts on behalf
+ * of the channels RQs here.
+ */
+ bpf_prog_add(prog, priv->params.num_channels);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ struct mlx5e_channel *c = priv->channel[i];
+
+ set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ napi_synchronize(&c->napi);
+ /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+
+ old_prog = xchg(&c->rq.xdp_prog, prog);
+
+ clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ /* napi_schedule in case we have missed anything */
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+ napi_schedule(&c->napi);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static bool mlx5e_xdp_attached(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_prog;
+}
+
+static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx5e_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx5e_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
+ * reenabling interrupts.
+ */
+static void mlx5e_netpoll(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ napi_schedule(&priv->channel[i]->napi);
+}
+#endif
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -2805,6 +3221,10 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2836,6 +3256,10 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2910,13 +3334,6 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
indirection_rqt[i] = i % num_channels;
}
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
-}
-
static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
{
enum pcie_link_width width;
@@ -2996,11 +3413,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_admin = false;
@@ -3013,33 +3432,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.rx_cqe_compress_admin =
cqe_compress_heuristic(link_speed, pci_bw);
}
-
priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
- switch (priv->params.rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- priv->params.rx_cqe_compress ?
- MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
- MLX5_MPWRQ_LOG_STRIDE_SIZE;
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ mlx5e_set_rq_priv_params(priv);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
priv->params.lro_en = true;
- break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- }
-
- mlx5_core_info(mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- priv->params.rx_cqe_compress_admin);
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
@@ -3059,19 +3456,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+ /* Extra room needed for build_skb */
+ MLX5_RX_HEADROOM -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */
MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
-
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
#endif
@@ -3211,37 +3605,37 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *mkc;
- int inlen = sizeof(*in);
u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- mkc = &in->seg;
- mkc->status = MLX5_MKEY_STATUS_FREE;
- mkc->flags = MLX5_PERM_UMR_EN |
- MLX5_PERM_LOCAL_READ |
- MLX5_PERM_LOCAL_WRITE |
- MLX5_ACCESS_MODE_MTT;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
- mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
- mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
- mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
- mkc->log2_page_size = PAGE_SHIFT;
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
- err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
- NULL, NULL);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
+ kvfree(in);
return err;
}
@@ -3360,6 +3754,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep rep;
+ mlx5_lag_add(mdev, netdev);
+
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
udp_tunnel_get_rx_info(netdev);
@@ -3373,9 +3769,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
- rep.vport = 0;
+ rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv;
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
@@ -3383,6 +3779,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
queue_work(priv->wq, &priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
+ mlx5_lag_remove(priv->mdev);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3399,13 +3796,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.max_tc = MLX5E_MAX_NUM_TC,
};
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv)
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
+ int nch = profile->max_nch(mdev);
struct net_device *netdev;
struct mlx5e_priv *priv;
- int nch = profile->max_nch(mdev);
- int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -3423,12 +3820,31 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_netdev;
+ goto err_cleanup_nic;
+
+ return netdev;
+
+err_cleanup_nic:
+ profile->cleanup(priv);
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ const struct mlx5e_profile *profile;
+ struct mlx5e_priv *priv;
+ int err;
+
+ priv = netdev_priv(netdev);
+ profile = priv->profile;
+ clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
err = mlx5e_create_umr_mkey(priv);
if (err) {
mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto err_destroy_wq;
+ goto out;
}
err = profile->init_tx(priv);
@@ -3451,20 +3867,16 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_set_dev_port_mtu(netdev);
- err = register_netdev(netdev);
- if (err) {
- mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_dealloc_q_counters;
- }
-
if (profile->enable)
profile->enable(priv);
- return priv;
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
-err_dealloc_q_counters:
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
+ return 0;
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
@@ -3475,13 +3887,8 @@ err_cleanup_tx:
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
-
-err_free_netdev:
- free_netdev(netdev);
-
- return NULL;
+out:
+ return err;
}
static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
@@ -3503,20 +3910,84 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
}
}
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ const struct mlx5e_profile *profile = priv->profile;
+
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
+
+ flush_workqueue(priv->wq);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_close(netdev);
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
+ mlx5e_destroy_q_counter(priv);
+ profile->cleanup_rx(priv);
+ mlx5e_close_drop_rq(priv);
+ profile->cleanup_tx(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
+ cancel_delayed_work_sync(&priv->update_stats_work);
+}
+
+/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
+ * hardware contexts and to connect it to the current netdev.
+ */
+static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ if (netif_device_present(netdev))
+ return 0;
+
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ return err;
+
+ err = mlx5e_attach_netdev(mdev, netdev);
+ if (err) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
void *ppriv = NULL;
- void *ret;
-
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ void *priv;
+ int vport;
+ int err;
+ struct net_device *netdev;
- if (mlx5e_create_mdev_resources(mdev))
+ err = mlx5e_check_required_hca_cap(mdev);
+ if (err)
return NULL;
mlx5e_register_vport_rep(mdev);
@@ -3524,12 +3995,39 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
ppriv = &esw->offloads.vport_reps[0];
- ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
- if (!ret) {
- mlx5e_destroy_mdev_resources(mdev);
- return NULL;
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
+ goto err_unregister_reps;
}
- return ret;
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5e_attach(mdev, priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_detach;
+ }
+
+ return priv;
+
+err_detach:
+ mlx5e_detach(mdev, priv);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(mdev, priv);
+
+err_unregister_reps:
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ return NULL;
}
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
@@ -3537,30 +4035,11 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- if (profile->disable)
- profile->disable(priv);
-
- flush_workqueue(priv->wq);
- if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
- netif_device_detach(netdev);
- mlx5e_close(netdev);
- } else {
- unregister_netdev(netdev);
- }
-
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(priv);
- profile->cleanup_tx(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
-
- if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
- free_netdev(netdev);
+ free_netdev(netdev);
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
@@ -3570,12 +4049,11 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
int vport;
- mlx5e_destroy_netdev(mdev, priv);
-
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5e_detach(mdev, vpriv);
+ mlx5e_destroy_netdev(mdev, priv);
}
static void *mlx5e_get_netdev(void *vpriv)
@@ -3588,6 +4066,8 @@ static void *mlx5e_get_netdev(void *vpriv)
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add,
.remove = mlx5e_remove,
+ .attach = mlx5e_attach,
+ .detach = mlx5e_detach,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 134de4a11f1d..3c97da103d30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -413,19 +413,50 @@ static struct mlx5e_profile mlx5e_rep_profile = {
int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
- rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
- if (!rep->priv_data) {
- pr_warn("Failed to create representor for vport %d\n",
+ struct net_device *netdev;
+ int err;
+
+ netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!netdev) {
+ pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
return -EINVAL;
}
+
+ rep->priv_data = netdev_priv(netdev);
+
+ err = mlx5e_attach_netdev(esw->dev, netdev);
+ if (err) {
+ pr_warn("Failed to attach representor netdev for vport %d\n",
+ rep->vport);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ pr_warn("Failed to register representor netdev for vport %d\n",
+ rep->vport);
+ goto err_detach_netdev;
+ }
+
return 0;
+
+err_detach_netdev:
+ mlx5e_detach_netdev(esw->dev, netdev);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(esw->dev, rep->priv_data);
+
+ return err;
+
}
void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
+ struct net_device *netdev = priv->netdev;
+ mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e7c969df3dad..c6de6fba5843 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -179,96 +180,111 @@ unlock:
mutex_unlock(&priv->state_lock);
}
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
-{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
- skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
- if (unlikely(!skb))
- return -ENOMEM;
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
+ u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
- dma_addr = dma_map_single(rq->pdev,
- /* hw start padding */
- skb->data,
- /* hw end padding */
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (tail_next == cache->head) {
+ rq->stats.cache_full++;
+ return false;
+ }
- if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
- goto err_free_skb;
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+}
- *((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr);
- wqe->data.lkey = rq->mkey_be;
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
- rq->skb[ix] = skb;
+ if (unlikely(cache->head == cache->tail)) {
+ rq->stats.cache_empty++;
+ return false;
+ }
- return 0;
+ if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ rq->stats.cache_busy++;
+ return false;
+ }
-err_free_skb:
- dev_kfree_skb(skb);
+ *dma_info = cache->page_cache[cache->head];
+ cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+ rq->stats.cache_reuse++;
- return -ENOMEM;
+ dma_sync_single_for_device(rq->pdev, dma_info->addr,
+ RQ_PAGE_SIZE(rq),
+ DMA_FROM_DEVICE);
+ return true;
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
{
- struct sk_buff *skb = rq->skb[ix];
+ struct page *page;
+
+ if (mlx5e_rx_cache_get(rq, dma_info))
+ return 0;
+
+ page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!page))
+ return -ENOMEM;
- if (skb) {
- rq->skb[ix] = NULL;
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ dma_info->page = page;
+ dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+ put_page(page);
+ return -ENOMEM;
}
+
+ return 0;
}
-static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle)
{
- return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+ if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+ return;
+
+ dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+ rq->buff.map_dir);
+ put_page(dma_info->page);
}
-static inline void
-mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
- dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
- len, DMA_FROM_DEVICE);
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
+ return -ENOMEM;
+
+ wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ return 0;
}
-static inline void
-mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- /* No dma pre sync for fragmented MPWQE */
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ mlx5e_page_release(rq, di, true);
}
-static inline void
-mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
- unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
-
- wi->skbs_frags[page_idx]++;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- &wi->dma_info.page[page_idx], frag_offset,
- len, truesize);
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
}
-static inline void
-mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
{
unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
@@ -282,24 +298,11 @@ mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
}
static inline void
-mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
-{
- struct page *page = &wi->dma_info.page[page_idx];
-
- skb_copy_to_linear_data(skb, page_address(page) + offset,
- ALIGN(headlen, sizeof(long)));
-}
-
-static inline void
-mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
+mlx5e_copy_skb_header_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
@@ -324,46 +327,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
-{
- return rq->mpwqe_mtt_offset +
- wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
-static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
- struct mlx5e_sq *sq,
- struct mlx5e_umr_wqe *wqe,
- u16 ix)
-{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
-
- memset(wqe, 0, sizeof(*wqe));
- cseg->opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
- ds_cnt);
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->umr_mkey_be;
-
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
- ucseg->klm_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
- ucseg->bsf_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
- ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
- dseg->lkey = sq->mkey_be;
- dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
-}
-
-static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
@@ -372,135 +338,74 @@ static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- mlx5e_build_umr_wqe(rq, sq, wqe, ix);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
- sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
+ wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the mtt array, we allocate
- * a little more.
- */
- return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
- MLX5_UMR_MTT_ALIGNMENT);
-}
-
-static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi,
- int i)
-{
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return -ENOMEM;
-
- wi->umr.dma_info[i].page = page;
- wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
- put_page(page);
- return -ENOMEM;
- }
- wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
-
- return 0;
-}
-
-static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
+static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+ int err;
int i;
- wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
- MLX5_MPWRQ_PAGES_PER_WQE,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.dma_info))
- goto err_out;
-
- /* We allocate more than mtt_sz as we will align the pointer */
- wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.mtt_no_align))
- goto err_free_umr;
-
- wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
- wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
- PCI_DMA_TODEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
- goto err_free_mtt;
-
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ err = mlx5e_page_alloc_mapped(rq, dma_info);
+ if (unlikely(err))
goto err_unmap;
- page_ref_add(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
+ wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ page_ref_add(dma_info->page, pg_strides);
wi->skbs_frags[i] = 0;
}
wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
- wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
- wqe->data.lkey = rq->umr_mkey_be;
wqe->data.addr = cpu_to_be64(dma_offset);
return 0;
err_unmap:
while (--i >= 0) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
- put_page(wi->umr.dma_info[i].page);
- }
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
-
-err_free_mtt:
- kfree(wi->umr.mtt_no_align);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-err_free_umr:
- kfree(wi->umr.dma_info);
+ page_ref_sub(dma_info->page, pg_strides);
+ mlx5e_page_release(rq, dma_info, true);
+ }
-err_out:
- return -ENOMEM;
+ return err;
}
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(wi->umr.dma_info[i].page);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+ mlx5e_page_release(rq, dma_info, true);
}
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
- kfree(wi->umr.mtt_no_align);
- kfree(wi->umr.dma_info);
}
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
@@ -508,12 +413,11 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
- rq->stats.mpwqe_frag++;
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
@@ -521,84 +425,23 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
mlx5_wq_ll_update_db_record(wq);
}
-static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
-{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- gfp_t gfp_mask;
- int i;
-
- gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
- wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
- MLX5_MPWRQ_WQE_PAGE_ORDER);
- if (unlikely(!wi->dma_info.page))
- return -ENOMEM;
-
- wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
- rq->wqe_sz, PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
- put_page(wi->dma_info.page);
- return -ENOMEM;
- }
-
- /* We split the high-order page into order-0 ones and manage their
- * reference counter to minimize the memory held by small skb fragments
- */
- split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_add(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq));
- wi->skbs_frags[i] = 0;
- }
-
- wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
- wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
- wqe->data.lkey = rq->mkey_be;
- wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
-
- return 0;
-}
-
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
-{
- int i;
-
- dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
- PCI_DMA_FROMDEVICE);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_sub(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(&wi->dma_info.page[i]);
- }
-}
-
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
int err;
- err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
- if (unlikely(err)) {
- err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
- if (unlikely(err))
- return err;
- set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- mlx5e_post_umr_wqe(rq, ix);
- return -EBUSY;
- }
-
- return 0;
+ err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
}
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
}
#define RQ_CANNOT_POST(rq) \
@@ -617,9 +460,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
int err;
err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (err == -EBUSY)
+ return true;
if (unlikely(err)) {
- if (err != -EBUSY)
- rq->stats.buff_alloc_err++;
+ rq->stats.buff_alloc_err++;
break;
}
@@ -786,40 +630,207 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++;
rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
- napi_gro_receive(rq->cq.napi, skb);
+}
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ unsigned int data_offset,
+ int len)
+{
+ struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
+ unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
+ void *data = page_address(di->page) + data_offset;
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+ if (sq->db.xdp.doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->db.xdp.doorbell = false;
+ }
+ rq->stats.xdp_tx_full++;
+ mlx5e_page_release(rq, di, true);
+ return;
+ }
+
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
+ PCI_DMA_TODEVICE);
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* copy the inline part */
+ memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+
+ sq->db.xdp.di[pi] = *di;
+ wi->opcode = MLX5_OPCODE_SEND;
+ wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
+ sq->pc += MLX5E_XDP_TX_WQEBBS;
+
+ sq->db.xdp.doorbell = true;
+ rq->stats.xdp_tx++;
+}
+
+/* returns true if packet was consumed by xdp */
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ const struct bpf_prog *prog,
+ struct mlx5e_dma_info *di,
+ void *data, u16 len)
+{
+ struct xdp_buff xdp;
+ u32 act;
+
+ if (!prog)
+ return false;
+
+ xdp.data = data;
+ xdp.data_end = xdp.data + len;
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return true;
+ }
+}
+
+static inline
+struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ u16 wqe_counter, u32 cqe_bcnt)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ void *va, *data;
+
+ di = &rq->dma_info[wqe_counter];
+ va = page_address(di->page);
+ data = va + MLX5_RX_HEADROOM;
+
+ dma_sync_single_range_for_cpu(rq->pdev,
+ di->addr,
+ MLX5_RX_HEADROOM,
+ rq->buff.wqe_sz,
+ DMA_FROM_DEVICE);
+ prefetch(data);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
+ return NULL; /* page/packet was consumed by XDP */
+
+ skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ /* queue up for recycling ..*/
+ page_ref_inc(di->page);
+ mlx5e_page_release(rq, di, true);
+
+ skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_put(skb, cqe_bcnt);
+
+ return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
- struct sk_buff *skb;
__be16 wqe_counter_be;
+ struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
-
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
goto wq_ll_pop;
- }
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb_put(skb, cqe_bcnt);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (rep->vlan && skb_vlan_tag_present(skb))
+ skb_vlan_pop(skb);
+
+ napi_gro_receive(rq->cq.napi, skb);
+
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -831,7 +842,6 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
@@ -845,21 +855,20 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
page_idx++;
frag_offset -= PAGE_SIZE;
}
- wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
while (byte_cnt) {
u32 pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
- pg_consumed_bytes);
+ mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
page_idx++;
}
/* copy header */
- wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
- headlen);
+ mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+ head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -869,7 +878,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
struct sk_buff *skb;
u16 cqe_bcnt;
@@ -899,18 +908,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
return;
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
@@ -937,6 +948,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
+ if (xdp_sq->db.xdp.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdp_sq);
+ xdp_sq->db.xdp.doorbell = false;
+ }
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 499487ce3b53..57452fdc5154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -65,6 +65,9 @@ struct mlx5e_sw_stats {
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_full;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -73,10 +76,13 @@ struct mlx5e_sw_stats {
u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
- u64 rx_mpwqe_frag;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
+ u64 rx_cache_reuse;
+ u64 rx_cache_full;
+ u64 rx_cache_empty;
+ u64 rx_cache_busy;
/* Special handling counters */
u64 link_down_events_phy;
@@ -97,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -105,10 +114,13 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -272,12 +284,18 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_tx_full;
u64 wqe_err;
u64 mpwqe_filler;
- u64 mpwqe_frag;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
+ u64 cache_reuse;
+ u64 cache_full;
+ u64 cache_empty;
+ u64 cache_busy;
};
static const struct counter_desc rq_stats_desc[] = {
@@ -286,14 +304,20 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 22cfc4ac1837..ce8c54d18906 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -47,6 +48,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
struct mlx5_flow_rule *rule;
+ struct mlx5_esw_flow_attr *attr;
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
@@ -114,27 +116,30 @@ err_create_ft:
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- u32 action, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- u32 src_vport;
+ int err;
- if (rep->vport) /* set source vport for the flow */
- src_vport = rep->vport;
- else
- src_vport = FDB_UPLINK_VPORT;
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ return ERR_PTR(err);
- return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule)
+ struct mlx5_flow_rule *rule,
+ struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(rule);
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ mlx5_eswitch_del_vlan_action(esw, attr);
+
mlx5_del_flow_rule(rule);
mlx5_fc_destroy(priv->mdev, counter);
@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
key->src);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ }
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *dest_vport)
+ struct mlx5_esw_flow_attr *attr)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -369,17 +393,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tc_no_actions(exts))
return -EINVAL;
- *action = 0;
+ memset(attr, 0, sizeof(*attr));
+ attr->in_rep = priv->ppriv;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (*action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
- *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- struct mlx5_eswitch_rep *out_rep;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
out_priv = netdev_priv(out_dev);
- out_rep = out_priv->ppriv;
- if (out_rep->vport == 0)
- *dest_vport = FDB_UPLINK_VPORT;
- else
- *dest_vport = out_rep->vport;
- *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->out_rep = out_priv->ppriv;
+ continue;
+ }
+
+ if (is_tcf_vlan(a)) {
+ if (tcf_vlan_action(a) == VLAN_F_POP) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ attr->vlan = tcf_vlan_push_vid(a);
+ }
continue;
}
@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0;
- u32 flow_tag, action, dest_vport = 0;
+ bool fdb_flow = false;
+ u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ fdb_flow = true;
+
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
- if (flow)
+ if (flow) {
old = flow->rule;
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ old_attr = flow->attr;
+ } else {
+ if (fdb_flow)
+ flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
+ GFP_KERNEL);
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ }
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err < 0)
goto err_free;
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (fdb_flow) {
+ flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
+ err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
} else {
err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
if (err < 0)
@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto err_del_rule;
if (old)
- mlx5e_tc_del_flow(priv, old);
+ mlx5e_tc_del_flow(priv, old, old_attr);
goto out;
@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index eb0e72537f10..70a717382357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
- sq->skb[pi] = NULL;
sq->pc++;
sq->stats.nop++;
@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
+ u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+
+ sq->db.txq.dma_fifo[i].addr = addr;
+ sq->db.txq.dma_fifo[i].size = size;
+ sq->db.txq.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{
- return &sq->dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
+ struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->skb[pi] = skb;
+ sq->db.txq.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs;
@@ -368,8 +369,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
/* fill sq edge with nops to avoid wqe wrap around */
- while ((sq->pc & wq->sz_m1) > sq->edge)
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.txq.skb[pi] = NULL;
mlx5e_send_nop(sq, false);
+ }
if (bf)
sq->bf_budget--;
@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs;
}
}
+
+static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (wi->opcode == MLX5_OPCODE_NOP) {
+ sq->cc++;
+ continue;
+ }
+
+ sq->cc += wi->num_wqebbs;
+
+ mlx5e_page_release(&sq->channel->rq, di, false);
+ }
+}
+
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_txq_sq_descs(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_xdp_sq_descs(sq);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bf33bb69210..5703f19a6a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
do {
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq);
sqcc += icowi->num_wqebbs;
@@ -87,7 +87,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
case MLX5_OPCODE_NOP:
break;
case MLX5_OPCODE_UMR:
- mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ mlx5e_post_rx_mpwqe(&sq->channel->rq);
break;
default:
WARN_ONCE(true,
@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
}
+static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return false;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ continue;
+ }
+
+ sqcc += wi->num_wqebbs;
+ /* Recycle RX page */
+ mlx5e_page_release(&sq->channel->rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+ if (c->xdp)
+ busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 0e30602ef76d..aaca09002ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -86,23 +86,12 @@ struct cre_des_eq {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- struct mlx5_destroy_eq_mbox_in in;
- struct mlx5_destroy_eq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
- in.eqn = eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (!err)
- goto ex;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
-ex:
- return err;
+ MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
+ MLX5_SET(destroy_eq_in, in, eq_number, eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
+ u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_create_eq_mbox_in *in;
- struct mlx5_create_eq_mbox_out out;
- int err;
+ __be64 *pas;
+ void *eqc;
int inlen;
+ u32 *in;
+ int err;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf(eq);
- inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
+ inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
+ MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
- memset(&out, 0, sizeof(out));
- mlx5_fill_page_array(&eq->buf, in->pas);
+ pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
+ mlx5_fill_page_array(&eq->buf, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
- in->ctx.intr = vecidx;
- in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->events_mask = cpu_to_be64(mask);
+ MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+ MLX5_SET64(create_eq_in, in, event_bitmask, mask);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- goto err_in;
+ eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
+ MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+ MLX5_SET(eqc, eqc, uar_page, uar->index);
+ MLX5_SET(eqc, eqc, intr, vecidx);
+ MLX5_SET(eqc, eqc, log_page_size,
+ eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
goto err_in;
- }
snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
- eq->eqn = out.eq_number;
+ eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_eq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
- in.eqn = eq->eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b247949df135..abbf2c369923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -81,19 +81,12 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
- int err;
-
- memset(out, 0, sizeof(out));
- memset(in, 0, sizeof(in));
MLX5_SET(modify_nic_vport_context_in, in,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
@@ -116,113 +109,44 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
- return 0;
-ex:
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* E-Switch vport context HW commands */
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
-
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
-
- MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
-}
-
-static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 *vlan, u8 *qos)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
- int err;
- bool cvlan_strip;
- bool cvlan_insert;
-
- memset(out, 0, sizeof(out));
-
- *vlan = 0;
- *qos = 0;
-
- if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
- !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
- return -ENOTSUPP;
-
- err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
- if (err)
- goto out;
-
- cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_strip);
-
- cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_insert);
-
- if (cvlan_strip || cvlan_insert) {
- *vlan = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_id);
- *qos = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_pcp);
- }
-
- esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
- vport, *vlan, *qos);
-out:
- return err;
-}
-
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
+ MLX5_SET(modify_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
-
- MLX5_SET(modify_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
-
- return mlx5_cmd_exec_check_status(dev, in, inlen,
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 vlan, u8 qos, bool set)
+ u16 vlan, u8 qos, u8 set_flags)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
- esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
- vport, vlan, qos, set);
+ esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
+ vport, vlan, qos, set_flags);
- if (set) {
+ if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
+
+ if (set_flags & SET_VLAN_INSERT) {
/* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert, 1);
+
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
@@ -241,13 +165,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
u8 *mac, u8 vlan_valid, u16 vlan)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
u8 *in_mac_addr;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(set_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
MLX5_SET(set_l2_table_entry_in, in, table_index, index);
@@ -257,23 +178,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
MLX5_SET(delete_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
@@ -340,7 +256,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
- pr_warn("FDB: Failed to alloc match parameters\n");
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
return NULL;
}
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -374,8 +290,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) {
- pr_warn(
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
+ esw_warn(esw->dev,
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
@@ -955,7 +871,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
vport_num, promisc_all, promisc_mc);
- if (!vport->trusted || !vport->enabled) {
+ if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
promisc_mc = 0;
promisc_all = 0;
@@ -1291,30 +1207,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_spec *spec;
- u8 smac[ETH_ALEN];
int err = 0;
u8 *smac_v;
- if (vport->spoofchk) {
- err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
- if (err) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
- vport->vport, err);
- return err;
- }
+ if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
- if (!is_valid_ether_addr(smac)) {
- mlx5_core_warn(esw->dev,
- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
- vport->vport);
- return -EPERM;
- }
}
esw_vport_cleanup_ingress_rules(esw, vport);
- if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_vport_disable_ingress_acl(esw, vport);
return 0;
}
@@ -1323,7 +1229,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1333,16 +1239,16 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- if (vport->vlan || vport->qos)
+ if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- if (vport->spoofchk) {
+ if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
- ether_addr_copy(smac_v, smac);
+ ether_addr_copy(smac_v, vport->info.mac);
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -1352,8 +1258,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
- pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
@@ -1365,8 +1272,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
- pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.drop_rule = NULL;
goto out;
}
@@ -1386,7 +1294,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_vport_cleanup_egress_rules(esw, vport);
- if (!vport->vlan && !vport->qos) {
+ if (!vport->info.vlan && !vport->info.qos) {
esw_vport_disable_egress_acl(esw, vport);
return 0;
}
@@ -1395,7 +1303,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1409,7 +1317,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
@@ -1418,8 +1326,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
- pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.allowed_vlan = NULL;
goto out;
}
@@ -1432,8 +1341,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
- pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
@@ -1441,6 +1351,41 @@ out:
return err;
}
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
+static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int vport_num = vport->vport;
+
+ if (!vport_num)
+ return;
+
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ vport->info.link_state);
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
+ (vport->info.vlan || vport->info.qos));
+
+ /* Only legacy mode needs ACLs */
+ if (esw->mode == SRIOV_LEGACY) {
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+}
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1451,23 +1396,17 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Only VFs need ACLs for VST and spoofchk filtering */
- if (vport_num && esw->mode == SRIOV_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
+ /* Restore old vport configuration */
+ esw_apply_vport_conf(esw, vport);
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled = true;
/* only PF is trusted by default */
- vport->trusted = (vport_num) ? false : true;
+ if (!vport_num)
+ vport->info.trusted = true;
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1487,11 +1426,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
vport->enabled = false;
synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@@ -1503,7 +1437,12 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+
if (vport_num && esw->mode == SRIOV_LEGACY) {
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1590,6 +1529,25 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ /* VF Vports will be enabled when SRIOV is enabled */
+}
+
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_disable_vport(esw, 0);
+}
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
@@ -1657,6 +1615,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
+ vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1667,8 +1626,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
- esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
- /* VF Vports will be enabled when SRIOV is enabled */
return 0;
abort:
if (esw->work_queue)
@@ -1687,7 +1644,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
return;
esw_info(esw->dev, "cleanup\n");
- esw_disable_vport(esw, 0);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
@@ -1720,18 +1676,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
-{
- ((u8 *)node_guid)[7] = mac[0];
- ((u8 *)node_guid)[6] = mac[1];
- ((u8 *)node_guid)[5] = mac[2];
- ((u8 *)node_guid)[4] = 0xff;
- ((u8 *)node_guid)[3] = 0xfe;
- ((u8 *)node_guid)[2] = mac[3];
- ((u8 *)node_guid)[1] = mac[4];
- ((u8 *)node_guid)[0] = mac[5];
-}
-
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
@@ -1744,13 +1688,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
vport);
- return -EPERM;
+ err = -EPERM;
+ goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
@@ -1758,7 +1704,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
vport, err);
- return err;
+ goto unlock;
}
node_guid_gen_from_mac(&node_guid, mac);
@@ -1768,9 +1714,12 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport, err);
- mutex_lock(&esw->state_lock);
+ ether_addr_copy(evport->info.mac, mac);
+ evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
+
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -1778,22 +1727,38 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
+ struct mlx5_vport *evport;
+ int err = 0;
+
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- return mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport, link_state);
+ mutex_lock(&esw->state_lock);
+ evport = &esw->vports[vport];
+
+ err = mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport, link_state);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d link state, err = %d",
+ vport, err);
+ goto unlock;
+ }
+
+ evport->info.link_state = link_state;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return 0;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport;
- u16 vlan;
- u8 qos;
if (!ESW_ALLOWED(esw))
return -EPERM;
@@ -1805,54 +1770,61 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
- mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
- ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport);
- query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
- ivi->vlan = vlan;
- ivi->qos = qos;
- ivi->spoofchk = evport->spoofchk;
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(ivi->mac, evport->info.mac);
+ ivi->linkstate = evport->info.link_state;
+ ivi->vlan = evport->info.vlan;
+ ivi->qos = evport->info.qos;
+ ivi->spoofchk = evport->info.spoofchk;
+ ivi->trusted = evport->info.trusted;
+ mutex_unlock(&esw->state_lock);
return 0;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- int vport, u16 vlan, u8 qos)
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport;
int err = 0;
- int set = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
return -EINVAL;
- if (vlan || qos)
- set = 1;
-
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- return err;
+ goto unlock;
- mutex_lock(&esw->state_lock);
- evport->vlan = vlan;
- evport->qos = qos;
+ evport->info.vlan = vlan;
+ evport->info.qos = qos;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto out;
+ goto unlock;
err = esw_vport_egress_config(esw, evport);
}
-out:
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+}
+
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
@@ -1865,16 +1837,14 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- pschk = evport->spoofchk;
- evport->spoofchk = spoofchk;
- if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ evport = &esw->vports[vport];
+ pschk = evport->info.spoofchk;
+ evport->info.spoofchk = spoofchk;
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
- }
+ if (err)
+ evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
return err;
@@ -1890,10 +1860,9 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- evport->trusted = setting;
+ evport = &esw->vports[vport];
+ evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
mutex_unlock(&esw->state_lock);
@@ -1906,7 +1875,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
int err = 0;
u32 *out;
@@ -1919,8 +1888,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!out)
return -ENOMEM;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a96140971d77..2e2938e08cda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -109,6 +109,16 @@ struct vport_egress {
struct mlx5_flow_rule *drop_rule;
};
+struct mlx5_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ u8 qos;
+ u64 node_guid;
+ int link_state;
+ bool spoofchk;
+ bool trusted;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -121,10 +131,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
- u16 vlan;
- u8 qos;
- bool spoofchk;
- bool trusted;
+ struct mlx5_vport_info info;
+
bool enabled;
u16 enabled_events;
};
@@ -149,6 +157,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule;
+ int vlan_push_pop_refcount;
} offloads;
};
};
@@ -170,11 +179,14 @@ struct mlx5_eswitch_rep {
void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
u16 vport;
- struct mlx5_flow_rule *vport_rx_rule;
+ u8 hw_id[ETH_ALEN];
void *priv_data;
+
+ struct mlx5_flow_rule *vport_rx_rule;
struct list_head vport_sqs_list;
+ u16 vlan;
+ u32 vlan_refcount;
bool valid;
- u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
@@ -201,9 +213,14 @@ struct mlx5_eswitch {
int mode;
};
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw);
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
@@ -224,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
struct mlx5_flow_spec;
+struct mlx5_esw_flow_attr;
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport);
+ struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+enum {
+ SET_VLAN_STRIP = BIT(0),
+ SET_VLAN_INSERT = BIT(1)
+};
+
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+
+struct mlx5_esw_flow_attr {
+ struct mlx5_eswitch_rep *in_rep;
+ struct mlx5_eswitch_rep *out_rep;
+
+ int action;
+ u16 vlan;
+ bool vlan_handled;
+};
+
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num);
@@ -241,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport);
+ int vport_index);
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7de40e6b0c25..c55ad8d00c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -46,19 +46,22 @@ enum {
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
void *misc;
+ int action;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ action = attr->action;
+
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = dst_vport;
+ dest.vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
}
+static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vf_vport, err = 0;
+
+ esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+ for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+ rep = &esw->offloads.vport_reps[vf_vport];
+ if (!rep->valid)
+ continue;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static struct mlx5_eswitch_rep *
+esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push)
+ vport = in_rep;
+ else if (pop)
+ vport = out_rep;
+ else
+ vport = in_rep;
+
+ return vport;
+}
+
+static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
+ bool push, bool pop, bool fwd)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep;
+
+ if ((push || pop) && !fwd)
+ goto out_notsupp;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push && in_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
+ if (!push && !pop && fwd)
+ if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* protects against (1) setting rules with different vlans to push and
+ * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
+ */
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ goto out_notsupp;
+
+ return 0;
+
+out_notsupp:
+ return -ENOTSUPP;
+}
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ if (err)
+ return err;
+
+ attr->vlan_handled = false;
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+ vport->vlan_refcount++;
+ attr->vlan_handled = true;
+ }
+
+ return 0;
+ }
+
+ if (!push && !pop)
+ return 0;
+
+ if (!(offloads->vlan_push_pop_refcount)) {
+ /* it's the 1st vlan rule, apply global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+ offloads->vlan_push_pop_refcount++;
+
+ if (push) {
+ if (vport->vlan_refcount)
+ goto skip_set_push;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ SET_VLAN_INSERT | SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ vport->vlan = attr->vlan;
+skip_set_push:
+ vport->vlan_refcount++;
+ }
+out:
+ if (!err)
+ attr->vlan_handled = true;
+ return err;
+}
+
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ if (!attr->vlan_handled)
+ return 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+ vport->vlan_refcount--;
+
+ return 0;
+ }
+
+ if (push) {
+ vport->vlan_refcount--;
+ if (vport->vlan_refcount)
+ goto skip_unset_push;
+
+ vport->vlan = 0;
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
+ 0, 0, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+
+skip_unset_push:
+ offloads->vlan_push_pop_refcount--;
+ if (offloads->vlan_push_pop_refcount)
+ return 0;
+
+ /* no more vlan rules, stop global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, 0);
+
+out:
+ return err;
+}
+
static struct mlx5_flow_rule *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
{
struct mlx5_flow_rule *flow_rule;
struct mlx5_esw_sq *esw_sq;
- int vport;
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
return 0;
- vport = rep->vport == 0 ?
- FDB_UPLINK_VPORT : rep->vport;
-
for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) {
@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
/* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- vport,
+ rep->vport,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -620,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+ int vport_index,
+ struct mlx5_eswitch_rep *__rep)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport_index];
+
+ memset(rep, 0, sizeof(*rep));
- memcpy(&offloads->vport_reps[rep->vport], rep,
- sizeof(struct mlx5_eswitch_rep));
+ rep->load = __rep->load;
+ rep->unload = __rep->unload;
+ rep->vport = __rep->vport;
+ rep->priv_data = __rep->priv_data;
+ ether_addr_copy(rep->hw_id, __rep->hw_id);
- INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
- offloads->vport_reps[rep->vport].valid = true;
+ INIT_LIST_HEAD(&rep->vport_sqs_list);
+ rep->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport)
+ int vport_index)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[vport];
+ rep = &offloads->vport_reps[vport_index];
- if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep);
- offloads->vport_reps[vport].valid = false;
+ rep->valid = false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 287ade151ec8..113c32326333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -41,10 +41,8 @@
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
@@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
+ enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
- if (next_ft) {
- MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
- }
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
@@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ switch (op_mod) {
+ case FS_FT_OP_MOD_NORMAL:
+ if (next_ft) {
+ MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
+ MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
+ }
+ break;
+
+ case FS_FT_OP_MOD_LAG_DEMUX:
+ MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
+ if (next_ft)
+ MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
+ next_ft->id);
+ break;
+ }
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
- MLX5_SET(modify_flow_table_in, in, other_vport, 1);
- }
- MLX5_SET(modify_flow_table_in, in, modify_field_select,
- MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
- if (next_ft) {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id);
+
+ if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in,
+ lag_master_next_table_id, next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in,
+ lag_master_next_table_id, 0);
+ }
} else {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number,
+ ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
+ MLX5_SET(modify_flow_table_in, in, table_miss_id,
+ next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ }
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
@@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
u32 *in,
unsigned int *group_id)
{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
- memset(out, 0, sizeof(out));
-
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
@@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in,
- inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out,
group_id);
-
return err;
}
@@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
@@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
{
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
- u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
@@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
kvfree(in);
-
return err;
}
@@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
unsigned group_id,
struct fs_fte *fte)
{
- return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+ return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
@@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int index)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
- int err;
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
-
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- return err;
-
- *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
-
- return 0;
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ return err;
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)];
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
void *stats;
int err = 0;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
-
return 0;
}
@@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
{
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- b->out, b->outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
@@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
+
+#define MAX_ENCAP_SIZE (128)
+
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
+ (MAX_ENCAP_SIZE / sizeof(u32))];
+ void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
+ encap_header);
+ void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
+ encap_header);
+ int inlen = header - (void *)in + size;
+ int err;
+
+ if (size > MAX_ENCAP_SIZE)
+ return -EINVAL;
+
+ memset(in, 0, inlen);
+ MLX5_SET(alloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
+ MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
+ MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
+ memcpy(header, encap_header, size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ return err;
+}
+
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(dealloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 158844cef82b..c5bc4686c832 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -35,6 +35,7 @@
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
+ enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
@@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id);
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3d6c1f65e586..5da2cc878582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -96,6 +96,10 @@
#define OFFLOADS_NUM_PRIOS 1
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+#define LAG_PRIO_NUM_LEVELS 1
+#define LAG_NUM_PRIOS 1
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -111,12 +115,16 @@ static struct init_tree_node {
int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 6,
+ .ar_size = 7,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
@@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node)
err = mlx5_cmd_destroy_flow_table(dev, ft);
if (err)
- pr_warn("flow steering can't destroy ft\n");
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
}
@@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node)
match_value = mlx5_vzalloc(match_len);
if (!match_value) {
- pr_warn("failed to allocate inbox\n");
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
return;
}
@@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node)
modify_mask,
fte);
if (err)
- pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
- __func__, fg->id, fte->index);
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
}
kvfree(match_value);
}
@@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node)
err = mlx5_cmd_delete_fte(dev, ft,
fte->index);
if (err)
- pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
fte->status = 0;
fg->num_ftes--;
@@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node)
dev = get_dev(&ft->node);
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- pr_warn("flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
}
static struct fs_fte *alloc_fte(u8 action,
@@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
}
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
- enum fs_flow_table_type table_type)
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
@@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
+ ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
ft->max_fte = max_fte;
@@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
}
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ enum fs_flow_table_op_mod op_mod,
u16 vport, int prio,
int max_fte, u32 level)
{
@@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
level += fs_prio->start_level;
ft = alloc_flow_table(level,
vport,
- roundup_pow_of_two(max_fte),
- root->table_type);
+ max_fte ? roundup_pow_of_two(max_fte) : 0,
+ root->table_type,
+ op_mod);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
}
tree_init_node(&ft->node, 1, del_flow_table);
- log_table_sz = ilog2(ft->max_fte);
+ log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
- log_table_sz, next_ft, &ft->id);
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
+ ft->level, log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level)
{
- return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
+ max_fte, level);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level, u16 vport)
{
- return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
+ max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level)
+{
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
+ level);
}
+EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
@@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->esw_ingress_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
+ if (steering->sniffer_rx_root_ns)
+ return &steering->sniffer_rx_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
+ if (steering->sniffer_tx_root_ns)
+ return &steering->sniffer_tx_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->esw_egress_root_ns);
cleanup_root_ns(steering->esw_ingress_root_ns);
cleanup_root_ns(steering->fdb_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
kfree(steering);
}
+static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
+ if (!steering->sniffer_tx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
+static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
+ if (!steering->sniffer_rx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
@@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
}
}
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
+ err = init_sniffer_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
+ err = init_sniffer_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
mlx5_cleanup_fs(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 9cffb6aeb4e9..71ff03bceabb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,13 @@ enum fs_flow_table_type {
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+};
+
+enum fs_flow_table_op_mod {
+ FS_FT_OP_MOD_NORMAL,
+ FS_FT_OP_MOD_LAG_DEMUX,
};
enum fs_fte_status {
@@ -61,6 +68,8 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
struct fs_node {
@@ -93,6 +102,7 @@ struct mlx5_flow_table {
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
+ enum fs_flow_table_op_mod op_mod;
struct {
bool active;
unsigned int required_groups;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 77fc1aa26114..5718aada6605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -38,13 +38,10 @@
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_board_id(struct mlx5_core_dev *dev)
@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_init_hca_mbox_in in;
- struct mlx5_cmd_init_hca_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
- return err;
+ MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_teardown_hca_mbox_in in;
- struct mlx5_cmd_teardown_hca_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
new file mode 100644
index 000000000000..55957246c0e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+enum {
+ MLX5_LAG_FLAG_BONDED = 1 << 0,
+};
+
+struct lag_func {
+ struct mlx5_core_dev *dev;
+ struct net_device *netdev;
+};
+
+/* Used for collection of netdev event info. */
+struct lag_tracker {
+ enum netdev_lag_tx_type tx_type;
+ struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
+ bool is_bonded;
+};
+
+/* LAG data of a ConnectX card.
+ * It serves both its phys functions.
+ */
+struct mlx5_lag {
+ u8 flags;
+ u8 v2p_map[MLX5_MAX_PORTS];
+ struct lag_func pf[MLX5_MAX_PORTS];
+ struct lag_tracker tracker;
+ struct delayed_work bond_work;
+ struct notifier_block nb;
+};
+
+/* General purpose, use for short periods of time.
+ * Beware of lock dependencies (preferably, no locks should be acquired
+ * under it).
+ */
+static DEFINE_MUTEX(lag_mutex);
+
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+ u8 remap_port2)
+{
+ u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+ void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
+
+ MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
+
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+ u8 remap_port2)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+ void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+ MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+ MLX5_SET(modify_lag_in, in, field_select, 0x1);
+
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
+
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+
+ MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
+
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+
+ MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+
+static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+{
+ return dev->priv.lag;
+}
+
+static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
+ struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].netdev == ndev)
+ return i;
+
+ return -1;
+}
+
+static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
+}
+
+static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+ u8 *port1, u8 *port2)
+{
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ if (tracker->netdev_state[0].tx_enabled) {
+ *port1 = 1;
+ *port2 = 1;
+ } else {
+ *port1 = 2;
+ *port2 = 2;
+ }
+ } else {
+ *port1 = 1;
+ *port2 = 2;
+ if (!tracker->netdev_state[0].link_up)
+ *port1 = 2;
+ else if (!tracker->netdev_state[1].link_up)
+ *port2 = 1;
+ }
+}
+
+static void mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ ldev->flags |= MLX5_LAG_FLAG_BONDED;
+
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
+ &ldev->v2p_map[1]);
+
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to create LAG (%d)\n",
+ err);
+}
+
+static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
+
+ err = mlx5_cmd_destroy_lag(dev0);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to destroy LAG (%d)\n",
+ err);
+}
+
+static void mlx5_do_bond(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+ struct lag_tracker tracker;
+ u8 v2p_port1, v2p_port2;
+ int i, err;
+
+ if (!dev0 || !dev1)
+ return;
+
+ mutex_lock(&lag_mutex);
+ tracker = ldev->tracker;
+ mutex_unlock(&lag_mutex);
+
+ if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
+ if (mlx5_sriov_is_enabled(dev0) ||
+ mlx5_sriov_is_enabled(dev1)) {
+ mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
+ return;
+ }
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+
+ mlx5_activate_lag(ldev, &tracker);
+
+ mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_enable_roce(dev1);
+ } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
+ &v2p_port2);
+
+ if ((v2p_port1 != ldev->v2p_map[0]) ||
+ (v2p_port2 != ldev->v2p_map[1])) {
+ ldev->v2p_map[0] = v2p_port1;
+ ldev->v2p_map[1] = v2p_port2;
+
+ err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ }
+ } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_disable_roce(dev1);
+
+ mlx5_deactivate_lag(ldev);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+ }
+}
+
+static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
+{
+ schedule_delayed_work(&ldev->bond_work, delay);
+}
+
+static void mlx5_do_bond_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
+ bond_work);
+ int status;
+
+ status = mlx5_dev_list_trylock();
+ if (!status) {
+ /* 1 sec delay. */
+ mlx5_queue_bond_work(ldev, HZ);
+ return;
+ }
+
+ mlx5_do_bond(ldev);
+ mlx5_dev_list_unlock();
+}
+
+static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *upper = info->upper_dev, *ndev_tmp;
+ struct netdev_lag_upper_info *lag_upper_info;
+ bool is_bonded;
+ int bond_status = 0;
+ int num_slaves = 0;
+ int idx;
+
+ if (!netif_is_lag_master(upper))
+ return 0;
+
+ lag_upper_info = info->upper_info;
+
+ /* The event may still be of interest if the slave does not belong to
+ * us, but is enslaved to a master which has one or more of our netdevs
+ * as slaves (e.g., if a new slave is added to a master that bonds two
+ * of our netdevs, we should unbond).
+ */
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+ if (idx > -1)
+ bond_status |= (1 << idx);
+
+ num_slaves++;
+ }
+ rcu_read_unlock();
+
+ /* None of this lagdev's netdevs are slaves of this master. */
+ if (!(bond_status & 0x3))
+ return 0;
+
+ if (lag_upper_info)
+ tracker->tx_type = lag_upper_info->tx_type;
+
+ /* Determine bonding status:
+ * A device is considered bonded if both its physical ports are slaves
+ * of the same lag master, and only them.
+ * Lag mode must be activebackup or hash.
+ */
+ is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
+ (bond_status == 0x3) &&
+ ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
+ (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+
+ if (tracker->is_bonded != is_bonded) {
+ tracker->is_bonded = is_bonded;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag_lower_info;
+ int idx;
+
+ if (!netif_is_lag_port(ndev))
+ return 0;
+
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
+ if (idx == -1)
+ return 0;
+
+ /* This information is used to determine virtual to physical
+ * port mapping.
+ */
+ lag_lower_info = info->lower_state_info;
+ if (!lag_lower_info)
+ return 0;
+
+ tracker->netdev_state[idx] = *lag_lower_info;
+
+ return 1;
+}
+
+static int mlx5_lag_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct lag_tracker tracker;
+ struct mlx5_lag *ldev;
+ int changed = 0;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return NOTIFY_DONE;
+
+ if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ return NOTIFY_DONE;
+
+ ldev = container_of(this, struct mlx5_lag, nb);
+ tracker = ldev->tracker;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
+ ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
+ ndev, ptr);
+ break;
+ }
+
+ mutex_lock(&lag_mutex);
+ ldev->tracker = tracker;
+ mutex_unlock(&lag_mutex);
+
+ if (changed)
+ mlx5_queue_bond_work(ldev, 0);
+
+ return NOTIFY_DONE;
+}
+
+static struct mlx5_lag *mlx5_lag_dev_alloc(void)
+{
+ struct mlx5_lag *ldev;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return NULL;
+
+ INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+
+ return ldev;
+}
+
+static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
+{
+ kfree(ldev);
+}
+
+static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
+{
+ unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+
+ if (fn >= MLX5_MAX_PORTS)
+ return;
+
+ mutex_lock(&lag_mutex);
+ ldev->pf[fn].dev = dev;
+ ldev->pf[fn].netdev = netdev;
+ ldev->tracker.netdev_state[fn].link_up = 0;
+ ldev->tracker.netdev_state[fn].tx_enabled = 0;
+
+ dev->priv.lag = ldev;
+ mutex_unlock(&lag_mutex);
+}
+
+static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev == dev)
+ break;
+
+ if (i == MLX5_MAX_PORTS)
+ return;
+
+ mutex_lock(&lag_mutex);
+ memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
+
+ dev->priv.lag = NULL;
+ mutex_unlock(&lag_mutex);
+}
+
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+ struct mlx5_lag *ldev = NULL;
+ struct mlx5_core_dev *tmp_dev;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ return;
+
+ tmp_dev = mlx5_get_next_phys_dev(dev);
+ if (tmp_dev)
+ ldev = tmp_dev->priv.lag;
+
+ if (!ldev) {
+ ldev = mlx5_lag_dev_alloc();
+ if (!ldev) {
+ mlx5_core_err(dev, "Failed to alloc lag dev\n");
+ return;
+ }
+ }
+
+ mlx5_lag_dev_add_pf(ldev, dev, netdev);
+
+ if (!ldev->nb.notifier_call) {
+ ldev->nb.notifier_call = mlx5_lag_netdev_event;
+ if (register_netdevice_notifier(&ldev->nb)) {
+ ldev->nb.notifier_call = NULL;
+ mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
+ }
+ }
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_remove(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ int i;
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev)
+ return;
+
+ if (mlx5_lag_is_bonded(ldev))
+ mlx5_deactivate_lag(ldev);
+
+ mlx5_lag_dev_remove_pf(ldev, dev);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ break;
+
+ if (i == MLX5_MAX_PORTS) {
+ if (ldev->nb.notifier_call)
+ unregister_netdevice_notifier(&ldev->nb);
+ cancel_delayed_work_sync(&ldev->bond_work);
+ mlx5_lag_dev_free(ldev);
+ }
+}
+
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ bool res;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ res = ldev && mlx5_lag_is_bonded(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_is_active);
+
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+
+ if (!(ldev && mlx5_lag_is_bonded(ldev)))
+ goto unlock;
+
+ if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ ndev = ldev->tracker.netdev_state[0].tx_enabled ?
+ ldev->pf[0].netdev : ldev->pf[1].netdev;
+ } else {
+ ndev = ldev->pf[0].netdev;
+ }
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ mutex_unlock(&lag_mutex);
+
+ return ndev;
+}
+EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
+ priv);
+ struct mlx5_lag *ldev;
+
+ if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
+ return true;
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+ return true;
+
+ /* If bonded, we do not add an IB device for PF1. */
+ return false;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index 1368dac00da0..3a3b0005fd2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -39,36 +39,33 @@
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
- struct mlx5_mad_ifc_mbox_in *in = NULL;
- struct mlx5_mad_ifc_mbox_out *out = NULL;
- int err;
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- out = kzalloc(sizeof(*out), GFP_KERNEL);
- if (!out) {
- err = -ENOMEM;
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
goto out;
- }
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
- in->hdr.opmod = cpu_to_be16(opmod);
- in->port = port;
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ MLX5_SET(mad_ifc_in, in, port, port);
- memcpy(in->data, inb, sizeof(in->data));
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out;
- }
-
- memcpy(outb, out->data, sizeof(out->data));
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2385bae92672..d9c3c70b29e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -72,16 +72,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-static LIST_HEAD(intf_list);
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(intf_mutex);
-
-struct mlx5_device_context {
- struct list_head list;
- struct mlx5_interface *intf;
- void *context;
-};
-
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -324,7 +314,7 @@ enum {
MLX5_DEV_CAP_FLAG_DCT,
};
-static u16 to_fw_pkey_sz(u32 size)
+static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
case 128:
@@ -340,7 +330,7 @@ static u16 to_fw_pkey_sz(u32 size)
case 4096:
return 5;
default:
- pr_warn("invalid pkey table size %d\n", size);
+ mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
return 0;
}
}
@@ -363,10 +353,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
- if (err)
- goto query_ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -409,20 +395,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
- int err;
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- return err;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
- return err;
+ return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
}
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
@@ -490,7 +467,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
- to_fw_pkey_sz(128));
+ to_fw_pkey_sz(dev, 128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -528,37 +505,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
@@ -758,44 +720,40 @@ clean:
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
- int err;
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
u32 sup_issi;
-
- memset(query_in, 0, sizeof(query_in));
- memset(query_out, 0, sizeof(query_out));
+ int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
-
- err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
if (err) {
- if (((struct mlx5_outbox_hdr *)query_out)->status ==
- MLX5_CMD_STAT_BAD_OP_ERR) {
+ u32 syndrome;
+ u8 status;
+
+ mlx5_cmd_mbox_status(query_out, &status, &syndrome);
+ if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
}
- pr_err("failed to query ISSI\n");
+ pr_err("failed to query ISSI err(%d)\n", err);
return err;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- memset(set_in, 0, sizeof(set_in));
- memset(set_out, 0, sizeof(set_out));
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
-
- err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
if (err) {
- pr_err("failed to set ISSI=1\n");
+ pr_err("failed to set ISSI=1 err(%d)\n", err);
return err;
}
@@ -809,120 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx)
- return;
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
@@ -995,8 +839,102 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
debugfs_remove(priv->dbg_root);
}
-#define MLX5_IB_MOD "mlx5_ib"
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto out;
+ }
+
+ err = mlx5_query_board_id(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query board id failed\n");
+ goto out;
+ }
+
+ err = mlx5_eq_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize eq\n");
+ goto out;
+ }
+
+ MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
+
+ err = mlx5_init_cq_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize cq table\n");
+ goto err_eq_cleanup;
+ }
+
+ mlx5_init_qp_table(dev);
+
+ mlx5_init_srq_table(dev);
+
+ mlx5_init_mkey_table(dev);
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_tables_cleanup;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+ goto err_rl_cleanup;
+ }
+#endif
+
+ err = mlx5_sriov_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+ goto err_eswitch_cleanup;
+ }
+
+ return 0;
+
+err_eswitch_cleanup:
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+
+err_rl_cleanup:
+#endif
+ mlx5_cleanup_rl_table(dev);
+
+err_tables_cleanup:
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+
+err_eq_cleanup:
+ mlx5_eq_cleanup(dev);
+
+out:
+ return err;
+}
+
+static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+{
+ mlx5_sriov_cleanup(dev);
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+#endif
+ mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_eq_cleanup(dev);
+}
+
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool boot)
{
struct pci_dev *pdev = dev->pdev;
int err;
@@ -1029,12 +967,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out_err;
}
- mlx5_pagealloc_init(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
dev_err(&pdev->dev, "enable hca failed\n");
- goto err_pagealloc_cleanup;
+ goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
@@ -1087,34 +1023,21 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_start_health_poll(dev);
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_query_board_id(dev);
- if (err) {
- dev_err(&pdev->dev, "query board id failed\n");
+ if (boot && mlx5_init_once(dev, priv)) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
}
err = mlx5_enable_msix(dev);
if (err) {
dev_err(&pdev->dev, "enable msix failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_eq_init(dev);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize eq\n");
- goto disable_msix;
+ goto err_cleanup_once;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
if (err) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
- goto err_eq_cleanup;
+ goto err_disable_msix;
}
err = mlx5_start_eqs(dev);
@@ -1130,15 +1053,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
}
err = mlx5_irq_set_affinity_hints(dev);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-
- MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
- mlx5_init_cq_table(dev);
- mlx5_init_qp_table(dev);
- mlx5_init_srq_table(dev);
- mlx5_init_mkey_table(dev);
+ goto err_affinity_hints;
+ }
err = mlx5_init_fs(dev);
if (err) {
@@ -1146,36 +1064,26 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_fs;
}
- err = mlx5_init_rl_table(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init rate limiting\n");
- goto err_rl;
- }
-
#ifdef CONFIG_MLX5_CORE_EN
- err = mlx5_eswitch_init(dev);
- if (err) {
- dev_err(&pdev->dev, "eswitch init failed %d\n", err);
- goto err_reg_dev;
- }
+ mlx5_eswitch_attach(dev->priv.eswitch);
#endif
- err = mlx5_sriov_init(dev);
+ err = mlx5_sriov_attach(dev);
if (err) {
dev_err(&pdev->dev, "sriov init failed %d\n", err);
goto err_sriov;
}
- err = mlx5_register_device(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto err_reg_dev;
+ if (mlx5_device_registered(dev)) {
+ mlx5_attach_device(dev);
+ } else {
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
-
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
@@ -1183,23 +1091,19 @@ out:
return 0;
-err_sriov:
- if (mlx5_sriov_cleanup(dev))
- dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
+err_reg_dev:
+ mlx5_sriov_detach(dev);
+err_sriov:
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-err_reg_dev:
- mlx5_cleanup_rl_table(dev);
-err_rl:
mlx5_cleanup_fs(dev);
+
err_fs:
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@@ -1208,12 +1112,13 @@ err_stop_eqs:
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
-err_eq_cleanup:
- mlx5_eq_cleanup(dev);
-
-disable_msix:
+err_disable_msix:
mlx5_disable_msix(dev);
+err_cleanup_once:
+ if (boot)
+ mlx5_cleanup_once(dev);
+
err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
@@ -1230,8 +1135,7 @@ reclaim_boot_pages:
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
-err_pagealloc_cleanup:
- mlx5_pagealloc_cleanup(dev);
+err_cmd_cleanup:
mlx5_cmd_cleanup(dev);
out_err:
@@ -1241,40 +1145,35 @@ out_err:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool cleanup)
{
int err = 0;
- err = mlx5_sriov_cleanup(dev);
- if (err) {
- dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
- __func__);
- return err;
- }
-
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
goto out;
}
- mlx5_unregister_device(dev);
+
+ if (mlx5_device_registered(dev))
+ mlx5_detach_device(dev);
+
+ mlx5_sriov_detach(dev);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-
- mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
- mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
@@ -1284,7 +1183,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
- mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
out:
@@ -1294,22 +1192,6 @@ out:
return err;
}
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
struct mlx5_core_event_handler {
void (*event)(struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
@@ -1323,6 +1205,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
+#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1344,8 +1227,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
- pr_warn("selected profile out of range, selecting default (%d)\n",
- MLX5_DEFAULT_PROF);
+ mlx5_core_warn(dev,
+ "selected profile out of range, selecting default (%d)\n",
+ MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
@@ -1368,12 +1252,18 @@ static int init_one(struct pci_dev *pdev,
goto close_pci;
}
- err = mlx5_load_one(dev, priv);
+ mlx5_pagealloc_init(dev);
+
+ err = mlx5_load_one(dev, priv, true);
if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
goto clean_health;
}
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
err = devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
@@ -1381,8 +1271,9 @@ static int init_one(struct pci_dev *pdev,
return 0;
clean_load:
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, true);
clean_health:
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
@@ -1400,11 +1291,15 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink);
- if (mlx5_unload_one(dev, priv)) {
+ mlx5_unregister_device(dev);
+
+ if (mlx5_unload_one(dev, priv, true)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
return;
}
+
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
@@ -1419,7 +1314,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
@@ -1491,7 +1386,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, priv);
+ err = mlx5_load_one(dev, priv, false);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
, __func__, err);
@@ -1513,7 +1408,7 @@ static void shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d5a0c2d61a18..ba2b09cc192f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -37,70 +37,30 @@
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
-struct mlx5_attach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_attach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
-struct mlx5_detach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_detach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_attach_mcg_mbox_in in;
- struct mlx5_attach_mcg_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ void *gid;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_detach_mcg_mbox_in in;
- struct mlx5_detach_mcg_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ void *gid;
- return err;
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 2f86ec6fcf25..3d0cfb9f18f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -58,8 +58,8 @@ do { \
} while (0)
#define mlx5_core_err(__dev, format, ...) \
- dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
- (__dev)->priv.name, __func__, __LINE__, current->pid, \
+ dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
@@ -75,19 +75,6 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
-static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
- int in_size, u32 *out,
- int out_size)
-{
- int err;
-
- err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
-}
-
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
@@ -96,7 +83,12 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
+int mlx5_sriov_attach(struct mlx5_core_dev *dev);
+void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
@@ -105,7 +97,38 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev);
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_attach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev);
+bool mlx5_device_registered(struct mlx5_core_dev *dev);
+int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_unregister_device(struct mlx5_core_dev *dev);
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+void mlx5_dev_list_lock(void);
+void mlx5_dev_list_unlock(void);
+int mlx5_dev_list_trylock(void);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+ /* LACP owner conditions:
+ * 1) Function is physical.
+ * 2) LAG is supported by FW.
+ * 3) LAG is managed by driver (currently the only option).
+ */
+ return MLX5_CAP_GEN(dev, vport_group_manager) &&
+ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+ MLX5_CAP_GEN(dev, lag_master);
+}
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 77a7293921d5..b9736f505bdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
}
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out)
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_create_mkey_mbox_out lout;
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 mkey_index;
+ void *mkc;
int err;
u8 key;
- memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock_irq(&dev->priv.mkey_lock);
- in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
- if (callback) {
- err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
- callback, context);
- return err;
- } else {
- err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
- }
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- if (err) {
- mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
- return err;
- }
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
- if (lout.hdr.status) {
- mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
- return mlx5_cmd_status_to_err(&lout.hdr);
- }
+ if (callback)
+ return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
+ callback, context);
+
+ err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
+ if (err)
+ return err;
- mkey->iova = be64_to_cpu(in->seg.start_addr);
- mkey->size = be64_to_cpu(in->seg.len);
- mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
- mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+ mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- be32_to_cpu(lout.mkey), key, mkey->key);
+ mkey_index, key, mkey->key);
/* connect to mkey tree */
write_lock_irq(&table->lock);
@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
+
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
+{
+ return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
+ NULL, 0, NULL, NULL);
+}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_destroy_mkey_mbox_in in;
- struct mlx5_destroy_mkey_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
write_lock_irqsave(&table->lock, flags);
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
return -ENOENT;
}
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_mkey_mbox_in in;
- int err;
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
- memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
+ MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
- struct mlx5_query_special_ctxs_mbox_in in;
- struct mlx5_query_special_ctxs_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *mkey = be32_to_cpu(out.dump_fill_mkey);
-
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+static inline u32 mlx5_get_psv(u32 *out, int psv_index)
+{
+ switch (psv_index) {
+ case 1: return MLX5_GET(create_psv_out, out, psv1_index);
+ case 2: return MLX5_GET(create_psv_out, out, psv2_index);
+ case 3: return MLX5_GET(create_psv_out, out, psv3_index);
+ default: return MLX5_GET(create_psv_out, out, psv0_index);
+ }
+}
+
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- struct mlx5_allocate_psv_in in;
- struct mlx5_allocate_psv_out out;
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
+ MLX5_SET(create_psv_in, in, pd, pdn);
+ MLX5_SET(create_psv_in, in, num_psv, npsvs);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
- in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "cmd exec failed %d\n", err);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n",
- out.hdr.status);
- return mlx5_cmd_status_to_err(&out.hdr);
- }
for (i = 0; i < npsvs; i++)
- sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
+ sig_index[i] = mlx5_get_psv(out, i);
return err;
}
@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- struct mlx5_destroy_psv_in in;
- struct mlx5_destroy_psv_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
- in.psv_number = cpu_to_be32(psv_num);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
- goto out;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n",
- out.hdr.status);
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto out;
- }
-
-out:
- return err;
+ MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, in, psvn, psv_num);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 32dea3524cee..d4585154151d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -44,12 +44,6 @@ enum {
MLX5_PAGES_TAKE = 2
};
-enum {
- MLX5_BOOT_PAGES = 1,
- MLX5_INIT_PAGES = 2,
- MLX5_POST_INIT_PAGES = 3
-};
-
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
@@ -67,33 +61,6 @@ struct fw_page {
unsigned free_count;
};
-struct mlx5_query_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_pages;
-};
-
-struct mlx5_manage_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_entries;
- __be64 pas[0];
-};
-
-struct mlx5_manage_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be32 num_entries;
- u8 rsvd[4];
- __be64 pas[0];
-};
-
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- struct mlx5_query_pages_inbox in;
- struct mlx5_query_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
- in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+ MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
+ MLX5_SET(query_pages_in, in, op_mod, boot ?
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *npages = be32_to_cpu(out.num_pages);
- *func_id = be16_to_cpu(out.func_id);
+ *npages = MLX5_GET(query_pages_out, out, num_pages);
+ *func_id = MLX5_GET(query_pages_out, out, function_id);
return err;
}
@@ -280,46 +244,37 @@ out_alloc:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int err;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- in->func_id = cpu_to_be16(func_id);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
- if (!err)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
- mlx5_core_warn(dev, "page notify failed\n");
-
- kfree(in);
+ mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
+ func_id, err);
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
- int inlen;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
int err;
+ u32 *in;
int i;
- inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
+ inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
goto out_free;
}
- memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
retry:
@@ -332,27 +287,21 @@ retry:
goto retry;
}
- in->pas[i] = cpu_to_be64(addr);
+ MLX5_SET64(manage_pages_in, in, pas[i], addr);
}
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
- in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be32(npages);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_4k;
}
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_4k;
- }
-
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
@@ -364,7 +313,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, be64_to_cpu(in->pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
@@ -373,64 +322,67 @@ out_free:
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
- struct mlx5_manage_pages_inbox *in, int in_size,
- struct mlx5_manage_pages_outbox *out, int out_size)
+ u32 *in, int in_size, u32 *out, int out_size)
{
struct fw_page *fwp;
struct rb_node *p;
+ u32 func_id;
u32 npages;
u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
- (u32 *)out, out_size);
+ return mlx5_cmd_exec(dev, in, in_size, out, out_size);
- npages = be32_to_cpu(in->num_entries);
+ /* No hard feelings, we want our pages back! */
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ func_id = MLX5_GET(manage_pages_in, in, function_id);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
- out->pas[i] = cpu_to_be64(fwp->addr);
p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+
+ MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
i++;
}
- out->num_entries = cpu_to_be32(i);
+ MLX5_SET(manage_pages_out, out, output_num_entries, i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
- struct mlx5_manage_pages_inbox in;
- struct mlx5_manage_pages_outbox *out;
+ int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int num_claimed;
- int outlen;
- u64 addr;
+ u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
- memset(&in, 0, sizeof(in));
- outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
+ outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
- in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be32(npages);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
- num_claimed = be32_to_cpu(out->num_entries);
+ num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (num_claimed > npages) {
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
num_claimed, npages);
@@ -438,10 +390,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- for (i = 0; i < num_claimed; i++) {
- addr = be64_to_cpu(out->pas[i]);
- free_4k(dev, addr);
- }
+ for (i = 0; i < num_claimed; i++)
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+
if (nclaimed)
*nclaimed = num_claimed;
@@ -518,8 +469,8 @@ static int optimal_reclaimed_pages(void)
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
- sizeof(struct mlx5_manage_pages_outbox)) /
- FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
+ MLX5_ST_SZ_BYTES(manage_pages_out)) /
+ MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}
@@ -594,6 +545,12 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
int prev_vfs_pages = dev->priv.vfs_pages;
+ /* In case of internal error we will free the pages manually later */
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_core_warn(dev, "Skipping wait for vf pages stage");
+ return 0;
+ }
+
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
dev->priv.name);
while (dev->priv.vfs_pages) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index f2d3aee909e8..bd830d8d6c5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -36,66 +36,27 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-struct mlx5_alloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- struct mlx5_alloc_pd_mbox_in in;
- struct mlx5_alloc_pd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *pdn = be32_to_cpu(out.pdn) & 0xffffff;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- struct mlx5_dealloc_pd_mbox_in in;
- struct mlx5_dealloc_pd_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
- in.pdn = cpu_to_be32(pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 752c08127138..34e7184e23c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -38,45 +38,42 @@
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
- u16 reg_num, int arg, int write)
+ u16 reg_id, int arg, int write)
{
- struct mlx5_access_reg_mbox_in *in = NULL;
- struct mlx5_access_reg_mbox_out *out = NULL;
+ int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
+ int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
int err = -ENOMEM;
+ u32 *out = NULL;
+ u32 *in = NULL;
+ void *data;
- in = mlx5_vzalloc(sizeof(*in) + size_in);
- if (!in)
- return -ENOMEM;
-
- out = mlx5_vzalloc(sizeof(*out) + size_out);
- if (!out)
- goto ex1;
-
- memcpy(in->data, data_in, size_in);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
- in->hdr.opmod = cpu_to_be16(!write);
- in->arg = cpu_to_be32(arg);
- in->register_id = cpu_to_be16(reg_num);
- err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(*out) + size_out);
- if (err)
- goto ex2;
+ in = mlx5_vzalloc(inlen);
+ out = mlx5_vzalloc(outlen);
+ if (!in || !out)
+ goto out;
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
+ data = MLX5_ADDR_OF(access_register_in, in, register_data);
+ memcpy(data, data_in, size_in);
- if (!err)
- memcpy(data_out, out->data, size_out);
+ MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
+ MLX5_SET(access_register_in, in, op_mod, !write);
+ MLX5_SET(access_register_in, in, argument, arg);
+ MLX5_SET(access_register_in, in, register_id, reg_id);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ if (err)
+ goto out;
+
+ data = MLX5_ADDR_OF(access_register_out, out, register_data);
+ memcpy(data_out, data, size_out);
-ex2:
+out:
kvfree(out);
-ex1:
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
-
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
-
return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
ptys_size, MLX5_REG_PTYS, 0, 0);
}
@@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
{
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
- u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(mlcr_reg, in, local_port, 1);
MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
@@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port)
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+ u32 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
+ local_port);
if (err)
return err;
- if (proto_mask == MLX5_PTYS_EN)
- *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- else
- *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
+
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
+ local_port);
+ if (err)
+ return err;
+
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask)
@@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
@@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
-
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
@@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, local_port, port);
-
mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 0);
@@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
MLX5_SET(pmtu_reg, in, local_port, port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
@@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
int module_mapping;
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmlp_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_PMLP, 0, 0);
if (err)
@@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, local_port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
pvlc_size, MLX5_REG_PVLC, 0, 0);
}
@@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
@@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
@@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
{
- u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
int err;
int i;
- memset(in, 0, sizeof(in));
for (i = 0; i < 8; i++) {
if (prio_tc[i] > mlx5_max_tc(mdev))
return -EINVAL;
@@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
@@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
@@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
void *ets_tcn_conf;
int i;
- memset(in, 0, sizeof(in));
-
MLX5_SET(qetc_reg, in, port_number, 1);
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
@@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
-
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
-
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
@@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
-
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
outlen, MLX5_REG_PCMR, 0, 0);
}
@@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable);
-
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b82d65802d96..d0a4005fe63a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
- int inlen)
+ u32 *in, int inlen)
{
- struct mlx5_create_qp_mbox_out out;
- struct mlx5_destroy_qp_mbox_in din;
- struct mlx5_destroy_qp_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err) {
- mlx5_core_warn(dev, "ret %d\n", err);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_warn(dev, "current num of QPs 0x%x\n",
- atomic_read(&dev->num_qps));
- return mlx5_cmd_status_to_err(&out.hdr);
- }
- qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
@@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- din.qpn = cpu_to_be32(qp->qpn);
- mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
-
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
@@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_destroy_qp_mbox_in in;
- struct mlx5_destroy_qp_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
mlx5_debug_qp_remove(dev, qp);
destroy_qprqsq_common(dev, qp);
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
atomic_dec(&dev->num_qps);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+struct mbox_info {
+ u32 *in;
+ u32 *out;
+ int inlen;
+ int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+ mbox->inlen = inlen;
+ mbox->outlen = outlen;
+ mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+ mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+ if (!mbox->in || !mbox->out) {
+ kfree(mbox->in);
+ kfree(mbox->out);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+ kfree(mbox->in);
+ kfree(mbox->out);
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+ u32 opt_param_mask, void *qpc,
+ struct mbox_info *mbox)
+{
+ mbox->out = NULL;
+ mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ) \
+ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+
+ switch (opcode) {
+ /* 2RST & 2ERR */
+ case MLX5_CMD_OP_2RST_QP:
+ if (MBOX_ALLOC(mbox, qp_2rst))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ if (MBOX_ALLOC(mbox, qp_2err))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+ break;
+
+ /* MODIFY with QPC */
+ case MLX5_CMD_OP_RST2INIT_QP:
+ if (MBOX_ALLOC(mbox, rst2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (MBOX_ALLOC(mbox, init2rtr_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ if (MBOX_ALLOC(mbox, rtr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (MBOX_ALLOC(mbox, rts2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ if (MBOX_ALLOC(mbox, init2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ default:
+ mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
+ opcode, qpn);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp)
{
- struct mlx5_modify_qp_mbox_out out;
- int err = 0;
+ struct mbox_info mbox;
+ int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(operation);
- in->qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
+ opt_param_mask, qpc, &mbox);
if (err)
return err;
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ mbox_free(&mbox);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
@@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
}
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_qp_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
- struct mlx5_alloc_xrcd_mbox_in in;
- struct mlx5_alloc_xrcd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
- else
- *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
-
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
{
- struct mlx5_dealloc_xrcd_mbox_in in;
- struct mlx5_dealloc_xrcd_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
- in.xrcdn = cpu_to_be32(xrcdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
@@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
u8 flags, int error)
{
- struct mlx5_page_fault_resume_mbox_in in;
- struct mlx5_page_fault_resume_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
- in.hdr.opmod = 0;
- flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
- MLX5_PAGE_FAULT_RESUME_WRITE |
- MLX5_PAGE_FAULT_RESUME_RDMA);
- flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
- in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
- (flags << MLX5_QPN_BITS));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
+
+ MLX5_SET(page_fault_resume_in, in, opcode,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, qpn, qpn);
+
+ if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
+ MLX5_SET(page_fault_resume_in, in, req_res, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
+ MLX5_SET(page_fault_resume_in, in, read_write, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
+ MLX5_SET(page_fault_resume_in, in, rdma, 1);
+ if (error)
+ MLX5_SET(page_fault_resume_in, in, error, 1);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
#endif
@@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*counter_id = MLX5_GET(alloc_q_counter_out, out,
counter_set_id);
@@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size)
{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
MLX5_SET(query_q_counter_in, in, clear, reset);
MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index c07c28bd3d55..104902a93a0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
MLX5_SET(set_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_RATE_LIMIT);
MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index b380a6bc1f85..e08627785590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -37,198 +37,200 @@
#include "eswitch.h"
#endif
-static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+ return !!sriov->num_vfs;
+}
+
+static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- err = mlx5_core_enable_hca(dev, vf);
+ if (sriov->enabled_vfs) {
+ mlx5_core_warn(dev,
+ "failed to enable SRIOV on device, already enabled with %d vfs\n",
+ sriov->enabled_vfs);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+ if (err) {
+ mlx5_core_warn(dev,
+ "failed to enable eswitch SRIOV (%d)\n", err);
+ return err;
+ }
+#endif
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
- mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1);
- } else {
- sriov->vfs_ctx[vf - 1].enabled = 1;
- mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1);
+ mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 1;
+ sriov->enabled_vfs++;
+ mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
+
}
+
+ return 0;
}
-static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+ int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- if (sriov->vfs_ctx[vf - 1].enabled) {
- if (mlx5_core_disable_hca(dev, vf))
- mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1);
- else
- sriov->vfs_ctx[vf - 1].enabled = 0;
+ if (!sriov->enabled_vfs)
+ return;
+
+ for (vf = 0; vf < sriov->num_vfs; vf++) {
+ if (!sriov->vfs_ctx[vf].enabled)
+ continue;
+ err = mlx5_core_disable_hca(dev, vf + 1);
+ if (err) {
+ mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 0;
+ sriov->enabled_vfs--;
}
+
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+#endif
+
+ if (mlx5_wait_for_vf_pages(dev))
+ mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs)
+static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err;
-
- if (pci_num_vf(pdev))
- pci_disable_sriov(pdev);
+ int err = 0;
- enable_vfs(dev, num_vfs);
-
- err = pci_enable_sriov(pdev, num_vfs);
- if (err) {
- dev_warn(&pdev->dev, "enable sriov failed %d\n", err);
- goto ex;
+ if (pci_num_vf(pdev)) {
+ mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
+ return -EBUSY;
}
- return 0;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-ex:
- disable_vfs(dev, num_vfs);
return err;
}
-static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
+{
+ pci_disable_sriov(pdev);
+}
+
+static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC);
- if (!sriov->vfs_ctx)
- return -ENOMEM;
+ err = mlx5_device_enable_sriov(dev, num_vfs);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
+ return err;
+ }
- sriov->enabled_vfs = num_vfs;
- err = mlx5_core_create_vfs(pdev, num_vfs);
+ err = mlx5_pci_enable_sriov(pdev, num_vfs);
if (err) {
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
+ mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_device_disable_sriov(dev);
return err;
}
+ sriov->num_vfs = num_vfs;
+
return 0;
}
-static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_sriov_disable(struct pci_dev *pdev)
{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- sriov->num_vfs = num_vfs;
-}
-
-static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov;
-
- sriov = &dev->priv.sriov;
- disable_vfs(dev, sriov->num_vfs);
-
- if (mlx5_wait_for_vf_pages(dev))
- mlx5_core_warn(dev, "timeout claiming VFs pages\n");
-
+ mlx5_pci_disable_sriov(pdev);
+ mlx5_device_disable_sriov(dev);
sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;
- mlx5_core_cleanup_vfs(dev);
-
- if (!num_vfs) {
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
-#endif
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
- if (!pci_vfs_assigned(pdev))
- pci_disable_sriov(pdev);
- else
- pr_info("unloading PF driver while leaving orphan VFs\n");
- return 0;
+ if (num_vfs && mlx5_lag_is_active(dev)) {
+ mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
+ return -EINVAL;
}
- err = mlx5_core_sriov_enable(pdev, num_vfs);
- if (err) {
- dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err);
- return err;
- }
+ if (num_vfs)
+ err = mlx5_sriov_enable(pdev, num_vfs);
+ else
+ mlx5_sriov_disable(pdev);
- mlx5_core_init_vfs(dev, num_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
-#endif
-
- return num_vfs;
+ return err ? err : num_vfs;
}
-static int sync_required(struct pci_dev *pdev)
+int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int cur_vfs = pci_num_vf(pdev);
- if (cur_vfs != sriov->num_vfs) {
- pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs);
- return 1;
- }
+ if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+ return 0;
- return 0;
+ /* If sriov VFs exist in PCI level, enable them in device level */
+ return mlx5_device_enable_sriov(dev, sriov->num_vfs);
+}
+
+void mlx5_sriov_detach(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_pf(dev))
+ return;
+
+ mlx5_device_disable_sriov(dev);
}
int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev;
- int cur_vfs;
+ int total_vfs;
if (!mlx5_core_is_pf(dev))
return 0;
- if (!sync_required(dev->pdev))
- return 0;
-
- cur_vfs = pci_num_vf(pdev);
- sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ sriov->num_vfs = pci_num_vf(pdev);
+ sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx)
return -ENOMEM;
- sriov->enabled_vfs = cur_vfs;
-
- mlx5_core_init_vfs(dev, cur_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
- SRIOV_LEGACY);
-#endif
-
- enable_vfs(dev, cur_vfs);
-
return 0;
}
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
- int err;
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
if (!mlx5_core_is_pf(dev))
- return 0;
+ return;
- err = mlx5_core_sriov_configure(pdev, 0);
- if (err)
- return err;
-
- return 0;
+ kfree(sriov->vfs_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c07f4d01b70e..3099630015d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
if (err)
goto out;
@@ -286,36 +285,30 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq, u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+
+ err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 28274a6fbafe..a00ff49eec18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,17 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqn = MLX5_GET(create_rq_out, out, rqn);
@@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
@@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*sqn = MLX5_GET(create_sq_out, out, sqn);
@@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
@@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
- u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn)
{
- u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
int err;
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
@@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ return mlx5_cmd_exec(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
- memset(in, 0, sizeof(in));
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
@@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *xsrqn)
{
- u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
int err;
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
@@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
void *srqc;
void *xrc_srqc;
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (!err) {
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
xrc_srq_context_entry);
@@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
- u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, in, op_mod,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
@@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5ff8af472bf5..ab0b896621a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -42,73 +42,28 @@ enum {
NUM_LOW_LAT_UUARS = 4,
};
-
-struct mlx5_alloc_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- struct mlx5_alloc_uar_mbox_in in;
- struct mlx5_alloc_uar_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
-
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto ex;
- }
-
- *uarn = be32_to_cpu(out.uarn) & 0xffffff;
-
-ex:
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- struct mlx5_free_uar_mbox_in in;
- struct mlx5_free_uar_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
- in.uarn = cpu_to_be32(uarn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
+ u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
-ex:
- return err;
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21365d06982b..525f17af108e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -39,10 +39,7 @@
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
- int err;
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
@@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
- int err;
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
MLX5_SET(modify_vport_state_in, in, vport_number, vport);
-
if (vport)
MLX5_SET(modify_vport_state_in, in, other_vport, 1);
-
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
}
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
@@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_ctx;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_inline, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ min_wqe_inline_mode, min_inline);
+
+ return mlx5_modify_nic_vport_context(mdev, in, inlen);
+}
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int *list_size)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
void *nic_vport_ctx;
int max_list_size;
int req_list_size;
@@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(curr_mac, addr_list[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
gid->global.subnet_prefix = tmp->global.subnet_prefix;
gid->global.interface_id = tmp->global.interface_id;
@@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
@@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
int is_group_manager;
void *out;
void *ctx;
@@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -752,9 +740,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
@@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
- if (err)
- goto free;
- err = mlx5_cmd_status_to_err_v2(out);
-
free:
kvfree(in);
return err;
@@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- goto ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
ex:
kfree(in);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index e25a73ed2981..07a9ba6cfc70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv)
static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- struct mlx5_outbox_hdr *hdr;
- int err;
-
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- hdr = (struct mlx5_outbox_hdr *)out;
- return hdr->status ? -ENOMEM : 0;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 068ee65a960b..aa33d58b9f81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -1146,10 +1146,10 @@ err_hwmon_init:
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d3476ead9982..c4f550b6f783 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -87,6 +87,7 @@ struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
u8 local_port;
u16 trap_id;
+ enum mlxsw_reg_hpkt_action action;
};
struct mlxsw_event_listener {
@@ -178,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
- used_max_lag:1,
- used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -191,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_sizes:1;
+ used_kvd_split_data:1; /* indicate for the kvd's values */
+
u8 max_vepa_channels;
- u16 max_lag;
- u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -213,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u32 kvd_hash_single_size;
- u32 kvd_hash_double_size;
+ u16 kvd_hash_granularity;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -268,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
- u8 max_span_valid:1;
+ u32 max_span_valid:1,
+ max_lag_valid:1,
+ max_ports_in_lag_valid:1,
+ kvd_size_valid:1,
+ kvd_single_min_size_valid:1,
+ kvd_double_min_size_valid:1,
+ max_virtual_routers_valid:1,
+ max_system_ports_valid:1,
+ max_vlan_groups_valid:1,
+ max_regions_valid:1,
+ max_rif_valid:1;
u8 max_span;
+ u8 max_lag;
+ u8 max_ports_in_lag;
+ u32 kvd_size;
+ u32 kvd_single_min_size;
+ u32 kvd_double_min_size;
+ u16 max_virtual_routers;
+ u16 max_system_ports;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u16 max_rif;
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ u32 kvd_single_size;
+ u32 kvd_double_size;
+ u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1d1360c178bb..e742bd4e8894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_MAX_LAG_ID 0x2520
+#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
+#define MLXSW_KVD_SIZE_ID 0x1001
+#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
+#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
+#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
+#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
+#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
+#define MLXSW_MAX_REGIONS_ID 0x2901
+#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
+ case MLXSW_MAX_LAG_ID:
+ resources->max_lag = val;
+ resources->max_lag_valid = 1;
+ break;
+ case MLXSW_MAX_PORTS_IN_LAG_ID:
+ resources->max_ports_in_lag = val;
+ resources->max_ports_in_lag_valid = 1;
+ break;
+ case MLXSW_KVD_SIZE_ID:
+ resources->kvd_size = val;
+ resources->kvd_size_valid = 1;
+ break;
+ case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
+ resources->kvd_single_min_size = val;
+ resources->kvd_single_min_size_valid = 1;
+ break;
+ case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
+ resources->kvd_double_min_size = val;
+ resources->kvd_double_min_size_valid = 1;
+ break;
+ case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
+ resources->max_virtual_routers = val;
+ resources->max_virtual_routers_valid = 1;
+ break;
+ case MLXSW_MAX_SYSTEM_PORT_ID:
+ resources->max_system_ports = val;
+ resources->max_system_ports_valid = 1;
+ break;
+ case MLXSW_MAX_VLAN_GROUPS_ID:
+ resources->max_vlan_groups = val;
+ resources->max_vlan_groups_valid = 1;
+ break;
+ case MLXSW_MAX_REGIONS_ID:
+ resources->max_regions = val;
+ resources->max_regions_valid = 1;
+ break;
+ case MLXSW_MAX_RIF_ID:
+ resources->max_rif = val;
+ resources->max_rif_valid = 1;
+ break;
default:
break;
}
@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
+static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
+{
+ u32 singles_size, doubles_size, linear_size;
+
+ if (!resources->kvd_single_min_size_valid ||
+ !resources->kvd_double_min_size_valid ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ linear_size = profile->kvd_linear_size;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile.
+ */
+ doubles_size = (resources->kvd_size - linear_size);
+ doubles_size *= profile->kvd_hash_double_parts;
+ doubles_size /= (profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts);
+ doubles_size /= profile->kvd_hash_granularity;
+ doubles_size *= profile->kvd_hash_granularity;
+ singles_size = resources->kvd_size - doubles_size -
+ linear_size;
+
+ /* Check results are legal. */
+ if (singles_size < resources->kvd_single_min_size ||
+ doubles_size < resources->kvd_double_min_size ||
+ resources->kvd_size < linear_size)
+ return -EIO;
+
+ resources->kvd_single_size = singles_size;
+ resources->kvd_double_size = doubles_size;
+ resources->kvd_linear_size = linear_size;
+
+ return 0;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
int i;
+ int err;
mlxsw_cmd_mbox_zero(mbox);
@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (profile->used_kvd_sizes) {
- mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
- mbox, profile->kvd_linear_size);
- mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
- mbox, profile->kvd_hash_single_size);
+ if (resources->kvd_size_valid) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ resources->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
- mbox, profile->kvd_hash_double_size);
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1721098eef13..6460c7256f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = {
*/
MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+/* reg_sfn_end
+ * Forces the current session to end.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
+
/* reg_sfn_num_rec
* Request: Number of learned notifications and aged-out notification
* records requested.
@@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_end_set(payload, 1);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}
@@ -1385,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
- mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
}
@@ -2131,6 +2138,18 @@ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+enum {
+ MLXSW_REG_PTYS_AN_STATUS_NA,
+ MLXSW_REG_PTYS_AN_STATUS_OK,
+ MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
@@ -2145,6 +2164,7 @@ MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
@@ -2177,6 +2197,13 @@ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+/* reg_ptys_eth_proto_lp_advertise
+ * The protocols that were advertised by the link partner during
+ * autonegotiation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+
static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
u32 proto_admin)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d48873bcbddf..1ec0a4ce3c46 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -248,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
@@ -262,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
return NULL;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -364,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
/* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
if (err)
goto err_mpar_reg_write;
@@ -405,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
return;
/* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
/* remove the SBIB buffer if it was egress SPAN */
@@ -556,8 +560,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -566,13 +571,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
- learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ learn_enable);
+}
+
static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -811,9 +823,9 @@ err_span_port_mtu_update:
return err;
}
-static struct rtnl_link_stats64 *
-mlxsw_sp_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_pcpu_stats *p;
@@ -840,6 +852,107 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ mlxsw_sp_port->hw_stats.cache);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+
return stats;
}
@@ -974,10 +1087,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
goto err_port_vp_mode_trans;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err)
- goto err_port_vid_learning_set;
-
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err)
goto err_port_add_vid;
@@ -985,8 +1094,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return 0;
err_port_add_vid:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-err_port_vid_learning_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
@@ -1013,8 +1120,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-
/* Drop FID reference. If this was the last reference the
* resources will be freed.
*/
@@ -1209,6 +1314,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
@@ -1547,8 +1654,6 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1557,10 +1662,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
- data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -1599,112 +1703,149 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
}
struct mlxsw_sp_port_link_mode {
+ enum ethtool_link_mode_bit_indices mask_ethtool;
u32 mask;
- u32 supported;
- u32 advertised;
u32 speed;
};
static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = SPEED_20000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .speed = 25000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = SPEED_50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
- .speed = 50000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .speed = 100000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ .speed = SPEED_100000,
},
};
#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
-static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+static void
+mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -1712,43 +1853,29 @@ static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
- return SUPPORTED_Backplane;
- return 0;
-}
-
-static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].supported;
- }
- return modes;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
}
-static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
{
- u32 modes = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].advertised;
+ __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ mode);
}
- return modes;
}
static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -1765,8 +1892,8 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
}
static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
@@ -1791,49 +1918,15 @@ static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
return PORT_OTHER;
}
-static int mlxsw_sp_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 eth_proto_oper;
- int err;
-
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
-
- cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
- mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause |
- SUPPORTED_Autoneg;
- cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
- mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
- eth_proto_oper, cmd);
-
- eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
- cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
-
- cmd->transceiver = XCVR_INTERNAL;
- return 0;
-}
-
-static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+static u32
+mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1863,61 +1956,113 @@ static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
return ptys_proto;
}
-static int mlxsw_sp_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+ mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
+ mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+}
+
+static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!autoneg)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+}
+
+static void
+mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 speed;
- u32 eth_proto_new;
- u32 eth_proto_cap;
- u32 eth_proto_admin;
+ u8 autoneg_status;
+ bool autoneg;
int err;
- speed = ethtool_cmd_speed(cmd);
+ autoneg = mlxsw_sp_port->link.autoneg;
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper);
+
+ mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
+
+ mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
+
+ eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
+ autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
+ mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
+
+ cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
+ cmd);
+
+ return 0;
+}
- eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
- mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
- mlxsw_sp_to_ptys_speed(speed);
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap, eth_proto_new;
+ bool autoneg;
+ int err;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
+ if (err)
return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "Not supported proto admin requested");
+ netdev_err(dev, "No supported speed requested\n");
return -EINVAL;
}
- if (eth_proto_new == eth_proto_admin)
- return 0;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to set proto admin");
+ if (err)
return err;
- }
if (!netif_running(dev))
return 0;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port->link.autoneg = autoneg;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
return 0;
}
@@ -1931,8 +2076,8 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
.get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_settings = mlxsw_sp_port_get_settings,
- .set_settings = mlxsw_sp_port_set_settings,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
};
static int
@@ -2082,6 +2227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->link.autoneg = 1;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -2103,6 +2249,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->hw_stats.cache =
+ kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
+
+ if (!mlxsw_sp_port->hw_stats.cache) {
+ err = -ENOMEM;
+ goto err_alloc_hw_stats;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ &update_stats_cache);
+
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
@@ -2129,7 +2285,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
@@ -2203,6 +2359,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
err_core_port_init:
@@ -2223,6 +2380,8 @@ err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ kfree(mlxsw_sp_port->hw_stats.cache);
+err_alloc_hw_stats:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2239,6 +2398,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
+ cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -2248,6 +2408,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2571,123 +2732,47 @@ static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_func(skb, local_port, priv);
+}
+
+#define MLXSW_SP_RXL(_func, _trap_id, _action) \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .action = MLXSW_REG_HPKT_ACTION_##_action, \
+ }
+
static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_FDB_MC,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU),
/* Traps for specific L2 packet types, not trapped as FDB MC */
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_STP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LACP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_EAPOL,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LLDP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MMRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MVRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RPVST,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_DHCP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPBC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPUC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MTUERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_TTLERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LBERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_OSPF,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IP2ME,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU),
+ /* L3 traps */
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU),
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2714,7 +2799,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rx_listener_register;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action,
mlxsw_sp_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@@ -2802,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
@@ -2813,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
@@ -2897,6 +3003,7 @@ err_span_init:
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
@@ -2910,38 +3017,26 @@ err_rx_listener_register:
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 64,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -2953,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_sizes = 1,
+ .used_kvd_split_data = 1,
+ .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .kvd_hash_single_parts = 2,
+ .kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
- .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
- .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -3073,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
- return MLXSW_SP_RIF_MAX;
+ return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
@@ -3159,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
@@ -3391,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
@@ -3598,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3809,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3847,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ac48abebe904..9b22863a924b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -45,7 +45,7 @@
#include <linux/list.h>
#include <linux/dcbnl.h>
#include <linux/in6.h>
-#include <net/switchdev.h>
+#include <linux/notifier.h>
#include "port.h"
#include "core.h"
@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_RIF_MAX 800
-
-#define MLXSW_SP_LAG_MAX 64
-#define MLXSW_SP_PORT_PER_LAG_MAX 16
+#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
-#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
-#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
- struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
@@ -263,6 +257,7 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_group_list;
struct list_head nexthop_neighs_list;
+ bool aborted;
};
struct mlxsw_sp {
@@ -275,7 +270,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
+ struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -290,7 +285,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
- struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
@@ -302,6 +297,7 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -341,7 +337,8 @@ struct mlxsw_sp_port {
} vport;
struct {
u8 tx_pause:1,
- rx_pause:1;
+ rx_pause:1,
+ autoneg:1;
} link;
struct {
struct ieee_ets *ets;
@@ -360,6 +357,11 @@ struct mlxsw_sp_port {
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 *cache;
+ struct delayed_work update_dw;
+ } hw_stats;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
@@ -477,9 +479,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+
+ for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
@@ -558,6 +563,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -578,11 +586,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 953b214f38d0..bcaed8a38037 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
- pool_info->pool_type = dir;
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
- pool_info->threshold_type = pr->mode;
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+ enum mlxsw_reg_sbpr_mode mode;
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, pool_type);
+ *p_pool_index = pool_index_get(cm->pool, dir);
return 0;
}
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3f5c51da6d3e..78fc557d6dd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -43,6 +43,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/ip_fib.h>
#include "spectrum.h"
#include "core.h"
@@ -122,17 +123,20 @@ struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
+ struct list_head list;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
+ struct fib_info *fi;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
+ struct list_head entry_list;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -154,6 +158,7 @@ static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
mlxsw_sp_fib_ht_params);
if (err)
return err;
+ list_add_tail(&fib_entry->list, &fib->entry_list);
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
return 0;
@@ -166,6 +171,7 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ list_del(&fib_entry->list);
rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
}
@@ -216,6 +222,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&fib->entry_list);
return fib;
err_rhashtable_init:
@@ -252,7 +259,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -261,7 +270,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -368,10 +379,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -384,7 +397,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto,
+ vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -394,7 +409,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -410,11 +426,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -548,15 +567,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
-static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_virtual_routers_valid)
+ return -EIO;
+
+ mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
+ sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
@@ -1081,9 +1118,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
{
char raleu_pl[MLXSW_REG_RALEU_LEN];
- mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
- adj_index, ecmp_size,
- new_adj_index, new_ecmp_size);
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1489,50 +1527,6 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
}
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
- err = __mlxsw_sp_router_init(mlxsw_sp);
- if (err)
- return err;
- mlxsw_sp_lpm_init(mlxsw_sp);
- mlxsw_sp_vrs_init(mlxsw_sp);
- err = mlxsw_sp_neigh_init(mlxsw_sp);
- if (err)
- goto err_neigh_init;
- return 0;
-
-err_neigh_init:
- __mlxsw_sp_router_fini(mlxsw_sp);
- return err;
-}
-
-void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_neigh_fini(mlxsw_sp);
- __mlxsw_sp_router_fini(mlxsw_sp);
-}
-
static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -1558,8 +1552,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1568,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
@@ -1589,8 +1585,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1637,94 +1634,102 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
-struct mlxsw_sp_router_fib4_add_info {
- struct switchdev_trans_item tritem;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_fib_entry *fib_entry;
-};
-
-static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
-{
- const struct mlxsw_sp_router_fib4_add_info *info = data;
- struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
- struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
-
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
- kfree(info);
-}
-
static int
mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4,
+ const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
+ struct mlxsw_sp_rif *r = NULL;
+ int nhsel;
+ int err;
- if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
}
- if (fib4->type != RTN_UNICAST)
+ if (fen_info->type != RTN_UNICAST)
return -EINVAL;
- if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
- struct mlxsw_sp_rif *r;
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (!nh->nh_dev)
+ continue;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
+ if (!r) {
+ /* In case router interface is not found for
+ * at least one of the nexthops, that means
+ * the nexthop points to some device unrelated
+ * to us. Set trap and pass the packets for
+ * this prefix to kernel.
+ */
+ break;
+ }
+ }
+ if (!r) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
- if (!r)
- return -EINVAL;
fib_entry->rif = r->rif;
- return 0;
+ } else {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ if (err)
+ return err;
}
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
}
static void
mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
- return;
- mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ fib_info_offload_dec(fib_entry->fi);
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return ERR_CAST(vr);
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (fib_entry) {
/* Already exists, just take a reference */
fib_entry->ref_count++;
return fib_entry;
}
- fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->fi = fi;
fib_entry->ref_count = 1;
- err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
if (err)
goto err_fib4_entry_init;
@@ -1740,21 +1745,23 @@ err_fib_entry_create:
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
if (!vr)
return NULL;
- return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len,
- fib4->fi->fib_dev);
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len,
+ fen_info->fi->fib_dev);
}
-void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_vr *vr = fib_entry->vr;
@@ -1765,60 +1772,43 @@ void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
- struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ unsigned int last_ref_count;
- fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
- if (IS_ERR(fib_entry))
- return PTR_ERR(fib_entry);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto err_alloc_info;
- }
- info->mlxsw_sp = mlxsw_sp;
- info->fib_entry = fib_entry;
- switchdev_trans_item_enqueue(trans, info,
- mlxsw_sp_router_fib4_add_info_destroy,
- &info->tritem);
- return 0;
-
-err_alloc_info:
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return err;
+ do {
+ last_ref_count = fib_entry->ref_count;
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ } while (last_ref_count != 1);
}
-static int
-mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
- info = switchdev_trans_item_dequeue(trans);
- fib_entry = info->fib_entry;
- kfree(info);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
+ return PTR_ERR(fib_entry);
+ }
if (fib_entry->ref_count != 1)
return 0;
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
- if (err)
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ }
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1830,24 +1820,15 @@ err_fib_entry_insert:
return err;
}
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- if (switchdev_trans_ph_prepare(trans))
- return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
- fib4, trans);
- return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
- fib4, trans);
-}
-
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
@@ -1861,3 +1842,172 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
+
+static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ int err;
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_entry *tmp;
+ struct mlxsw_sp_vr *vr;
+ int i;
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ continue;
+
+ list_for_each_entry_safe(fib_entry, tmp,
+ &vr->fib->entry_list, list) {
+ bool do_break = &tmp->list == &vr->fib->entry_list;
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
+ fib_entry);
+ mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+ if (do_break)
+ break;
+ }
+ }
+ mlxsw_sp->router.aborted = true;
+ err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_rif_valid)
+ return -EIO;
+
+ mlxsw_sp->rifs = kcalloc(resources->max_rif,
+ sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
+ if (err)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ register_fib_notifier(&mlxsw_sp->fib_nb);
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b654c517b91..5e00c79e8133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -254,12 +254,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
+static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ u16 vid;
+ int err;
+
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ }
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ if (err)
+ goto err_port_vid_learning_set;
+ }
+
+ return 0;
+
+err_port_vid_learning_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+ return err;
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
+ unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
- bool set;
int err;
if (!mlxsw_sp_port->bridged)
@@ -269,17 +297,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- set = mlxsw_sp_port->uc_flood ? false : true;
- err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ !mlxsw_sp_port->uc_flood);
if (err)
return err;
}
+ if ((learning ^ brport_flags) & BR_LEARNING) {
+ err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
+ !mlxsw_sp_port->learning);
+ if (err)
+ goto err_port_learning_set;
+ }
+
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0;
+
+err_port_learning_set:
+ if ((uc_flood ^ brport_flags) & BR_FLOOD)
+ mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ mlxsw_sp_port->uc_flood);
+ return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -631,6 +672,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ vid_e, learn_enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
@@ -671,6 +733,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ mlxsw_sp_port->learning);
+ if (err) {
+ netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
+ vid_begin, vid_end);
+ goto err_port_vid_learning_set;
+ }
+
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++) {
set_bit(vid, mlxsw_sp_port->active_vlans);
@@ -693,6 +763,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
+err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
@@ -971,11 +1044,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -997,29 +1065,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end)
{
- struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
- int err;
if (!mlxsw_sp_port->bridged)
return -EINVAL;
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
- vid_end);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID %d\n", pvid);
- return err;
- }
- }
+ if (pvid >= vid_begin && pvid <= vid_end)
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
+
+ __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
+ false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
@@ -1117,10 +1176,6 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1141,9 +1196,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
@@ -1362,8 +1419,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1425,8 +1480,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1492,20 +1545,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
rtnl_lock();
- do {
- mlxsw_reg_sfn_pack(sfn_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
- if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
- break;
- }
- num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
- for (i = 0; i < num_rec; i++)
- mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
- } while (num_rec);
+out:
rtnl_unlock();
-
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 377daa4d509c..c0c23e2f3275 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -997,7 +997,7 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
if (err) {
@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = 64,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index eb807b0dc72a..569ade6cf85c 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -134,7 +134,7 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
/* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define NS83820_VLAN_ACCEL_SUPPORT
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 68178819ff12..0efb2ba9a558 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -3,6 +3,13 @@ obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o
nfp_netvf-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
+ nfp_net_offload.o \
nfp_netvf_main.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+nfp_netvf-objs += \
+ nfp_bpf_verifier.o \
+ nfp_bpf_jit.o
+endif
+
nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
new file mode 100644
index 000000000000..22484b6fd3e8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ASM_H__
+#define __NFP_ASM_H__ 1
+
+#include "nfp_bpf.h"
+
+#define REG_NONE 0
+
+#define RE_REG_NO_DST 0x020
+#define RE_REG_IMM 0x020
+#define RE_REG_IMM_encode(x) \
+ (RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
+#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_XFR 0x080
+
+#define UR_REG_XFR 0x180
+#define UR_REG_NN 0x280
+#define UR_REG_NO_DST 0x300
+#define UR_REG_IMM UR_REG_NO_DST
+#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
+#define UR_REG_IMM_MAX 0x0ffULL
+
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
+
+#define nfp_is_br(_insn) \
+ (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
+
+enum br_mask {
+ BR_BEQ = 0x00,
+ BR_BNE = 0x01,
+ BR_BHS = 0x04,
+ BR_BLO = 0x05,
+ BR_BGE = 0x08,
+ BR_UNC = 0x18,
+};
+
+enum br_ev_pip {
+ BR_EV_PIP_UNCOND = 0,
+ BR_EV_PIP_COND = 1,
+};
+
+enum br_ctx_signal_state {
+ BR_CSS_NONE = 2,
+};
+
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+
+enum immed_width {
+ IMMED_WIDTH_ALL = 0,
+ IMMED_WIDTH_BYTE = 1,
+ IMMED_WIDTH_WORD = 2,
+};
+
+enum immed_shift {
+ IMMED_SHIFT_0B = 0,
+ IMMED_SHIFT_1B = 1,
+ IMMED_SHIFT_2B = 2,
+};
+
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+
+enum shf_op {
+ SHF_OP_NONE = 0,
+ SHF_OP_AND = 2,
+ SHF_OP_OR = 5,
+};
+
+enum shf_sc {
+ SHF_SC_R_ROT = 0,
+ SHF_SC_R_SHF = 1,
+ SHF_SC_L_SHF = 2,
+ SHF_SC_R_DSHF = 3,
+};
+
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+
+enum alu_op {
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NEG = 0x04,
+ ALU_OP_AND = 0x08,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
+};
+
+enum alu_dst_ab {
+ ALU_DST_A = 0,
+ ALU_DST_B = 1,
+};
+
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
+
+struct cmd_tgt_act {
+ u8 token;
+ u8 tgt_cmd;
+};
+
+enum cmd_tgt_map {
+ CMD_TGT_READ8,
+ CMD_TGT_WRITE8,
+ CMD_TGT_READ_LE,
+ CMD_TGT_READ_SWAP_LE,
+ __CMD_TGT_MAP_SIZE,
+};
+
+enum cmd_mode {
+ CMD_MODE_40b_AB = 0,
+ CMD_MODE_40b_BA = 1,
+ CMD_MODE_32b = 4,
+};
+
+enum cmd_ctx_swap {
+ CMD_CTX_SWAP = 0,
+ CMD_CTX_NO_SWAP = 3,
+};
+
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+
+enum lcsr_wr_src {
+ LCSR_WR_AREG,
+ LCSR_WR_BREG,
+ LCSR_WR_IMM,
+};
+
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
new file mode 100644
index 000000000000..87aa8a3e9112
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_BPF_H__
+#define __NFP_BPF_H__ 1
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
+
+/* For branch fixup logic use up-most byte of branch instruction as scratch
+ * area. Remember to clear this before sending instructions to HW!
+ */
+#define OP_BR_SPECIAL 0xff00000000000000ULL
+
+enum br_special {
+ OP_BR_NORMAL = 0,
+ OP_BR_GO_OUT,
+ OP_BR_GO_ABORT,
+};
+
+enum static_regs {
+ STATIC_REG_PKT = 1,
+#define REG_PKT_BANK ALU_DST_A
+ STATIC_REG_IMM = 2, /* Bank AB */
+};
+
+enum nfp_bpf_action_type {
+ NN_ACT_TC_DROP,
+ NN_ACT_TC_REDIR,
+ NN_ACT_DIRECT,
+};
+
+/* Software register representation, hardware encoding in asm.h */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+};
+
+#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
+
+#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
+#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
+#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
+#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
+#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
+#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
+#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+
+#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
+#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+
+#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAG_MARK 1
+#define NFP_BPF_ABI_MARK reg_nnr(1)
+#define NFP_BPF_ABI_PKT reg_nnr(2)
+#define NFP_BPF_ABI_LEN reg_nnr(3)
+
+struct nfp_prog;
+struct nfp_insn_meta;
+typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
+
+#define nfp_prog_first_meta(nfp_prog) \
+ list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_prog_last_meta(nfp_prog) \
+ list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_meta_next(meta) list_next_entry(meta, l)
+#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+
+/**
+ * struct nfp_insn_meta - BPF instruction wrapper
+ * @insn: BPF instruction
+ * @off: index of first generated machine instruction (in nfp_prog.prog)
+ * @n: eBPF instruction number
+ * @skip: skip this instruction (optimized out)
+ * @double_cb: callback for second part of the instruction
+ * @l: link on nfp_prog->insns list
+ */
+struct nfp_insn_meta {
+ struct bpf_insn insn;
+ unsigned int off;
+ unsigned short n;
+ bool skip;
+ instr_cb_t double_cb;
+
+ struct list_head l;
+};
+
+#define BPF_SIZE_MASK 0x18
+
+static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
+{
+ return BPF_CLASS(meta->insn.code);
+}
+
+static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
+{
+ return BPF_SRC(meta->insn.code);
+}
+
+static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
+{
+ return BPF_OP(meta->insn.code);
+}
+
+static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
+{
+ return BPF_MODE(meta->insn.code);
+}
+
+/**
+ * struct nfp_prog - nfp BPF program
+ * @prog: machine code
+ * @prog_len: number of valid instructions in @prog array
+ * @__prog_alloc_len: alloc size of @prog array
+ * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
+ * @num_regs: number of registers used by this program
+ * @regs_per_thread: number of basic registers allocated per thread
+ * @start_off: address of the first instruction in the memory
+ * @tgt_out: jump target for normal exit
+ * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_done: jump target to get the next packet
+ * @n_translated: number of successfully translated instructions (for errors)
+ * @error: error code if something went wrong
+ * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
+ */
+struct nfp_prog {
+ u64 *prog;
+ unsigned int prog_len;
+ unsigned int __prog_alloc_len;
+
+ enum nfp_bpf_action_type act;
+
+ unsigned int num_regs;
+ unsigned int regs_per_thread;
+
+ unsigned int start_off;
+ unsigned int tgt_out;
+ unsigned int tgt_abort;
+ unsigned int tgt_done;
+
+ unsigned int n_translated;
+ int error;
+
+ struct list_head insns;
+};
+
+struct nfp_bpf_result {
+ unsigned int n_instr;
+ bool dense_mode;
+};
+
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res);
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
new file mode 100644
index 000000000000..f8df5300f49c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -0,0 +1,1813 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/pkt_cls.h>
+#include <linux/unistd.h>
+
+#include "nfp_asm.h"
+#include "nfp_bpf.h"
+
+/* --- NFP prog --- */
+/* Foreach "multiple" entries macros provide pos and next<n> pointers.
+ * It's safe to modify the next pointers (but not pos).
+ */
+#define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos))
+
+#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l), \
+ next2 = list_next_entry(next, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l && \
+ &(nfp_prog)->insns != &next2->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos), \
+ next2 = nfp_meta_next(next))
+
+static bool
+nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.next != &nfp_prog->insns;
+}
+
+static bool
+nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.prev != &nfp_prog->insns;
+}
+
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *tmp;
+
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
+}
+
+static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
+{
+ if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
+ nfp_prog->error = -ENOSPC;
+ return;
+ }
+
+ nfp_prog->prog[nfp_prog->prog_len] = insn;
+ nfp_prog->prog_len++;
+}
+
+static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
+{
+ return nfp_prog->start_off + nfp_prog->prog_len;
+}
+
+static unsigned int
+nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+{
+ return offset - nfp_prog->start_off;
+}
+
+/* --- SW reg --- */
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+};
+
+static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding %08x\n", swreg);
+ return 0;
+ }
+}
+
+static int
+swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+ }
+}
+
+static int
+swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
+ bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ return 0;
+}
+
+/* --- Emitters --- */
+static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8] = { 0x00, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static void
+__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+{
+ enum cmd_ctx_swap ctx;
+ u64 insn;
+
+ if (sync)
+ ctx = CMD_CTX_SWAP;
+ else
+ ctx = CMD_CTX_NO_SWAP;
+
+ insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
+ FIELD_PREP(OP_CMD_CTX, ctx) |
+ FIELD_PREP(OP_CMD_B_SRC, breg) |
+ FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
+ FIELD_PREP(OP_CMD_XFER, xfer) |
+ FIELD_PREP(OP_CMD_CNT, size) |
+ FIELD_PREP(OP_CMD_SIG, sync) |
+ FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_MODE, mode);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+ if (reg.swap) {
+ pr_err("cmd can't swap arguments\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+}
+
+static void
+__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
+ enum br_ctx_signal_state css, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BR_BASE |
+ FIELD_PREP(OP_BR_MASK, mask) |
+ FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
+ FIELD_PREP(OP_BR_CSS, css) |
+ FIELD_PREP(OP_BR_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+{
+ if (defer > 2) {
+ pr_err("BUG: branch defer out of bounds %d\n", defer);
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+}
+
+static void
+emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
+{
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+}
+
+static void
+__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
+ u8 byte, bool equal, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BBYTE_BASE |
+ FIELD_PREP(OP_BB_A_SRC, areg) |
+ FIELD_PREP(OP_BB_BYTE, byte) |
+ FIELD_PREP(OP_BB_B_SRC, breg) |
+ FIELD_PREP(OP_BB_I8, imm8) |
+ FIELD_PREP(OP_BB_EQ, equal) |
+ FIELD_PREP(OP_BB_DEFBR, defer) |
+ FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_br_byte_neq(struct nfp_prog *nfp_prog,
+ u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
+ defer);
+}
+
+static void
+__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ enum immed_width width, bool invert,
+ enum immed_shift shift, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_IMMED_BASE |
+ FIELD_PREP(OP_IMMED_A_SRC, areg) |
+ FIELD_PREP(OP_IMMED_B_SRC, breg) |
+ FIELD_PREP(OP_IMMED_IMM, imm_hi) |
+ FIELD_PREP(OP_IMMED_WIDTH, width) |
+ FIELD_PREP(OP_IMMED_INV, invert) |
+ FIELD_PREP(OP_IMMED_SHIFT, shift) |
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+ enum immed_width width, bool invert, enum immed_shift shift)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
+ invert, shift, reg.wr_both);
+}
+
+static void
+__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ enum shf_sc sc, u8 shift,
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+{
+ u64 insn;
+
+ if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ if (sc == SHF_SC_L_SHF)
+ shift = 32 - shift;
+
+ insn = OP_SHF_BASE |
+ FIELD_PREP(OP_SHF_A_SRC, areg) |
+ FIELD_PREP(OP_SHF_SC, sc) |
+ FIELD_PREP(OP_SHF_B_SRC, breg) |
+ FIELD_PREP(OP_SHF_I8, i8) |
+ FIELD_PREP(OP_SHF_SW, sw) |
+ FIELD_PREP(OP_SHF_DST, dst) |
+ FIELD_PREP(OP_SHF_SHIFT, shift) |
+ FIELD_PREP(OP_SHF_OP, op) |
+ FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
+ FIELD_PREP(OP_SHF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
+ enum shf_sc sc, u8 shift)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_ALU_BASE |
+ FIELD_PREP(OP_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_ALU_DST, dst) |
+ FIELD_PREP(OP_ALU_SW, swap) |
+ FIELD_PREP(OP_ALU_OP, op) |
+ FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
+ FIELD_PREP(OP_ALU_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
+ u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
+ bool zero, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_LDF_BASE |
+ FIELD_PREP(OP_LDF_A_SRC, areg) |
+ FIELD_PREP(OP_LDF_SC, sc) |
+ FIELD_PREP(OP_LDF_B_SRC, breg) |
+ FIELD_PREP(OP_LDF_I8, imm8) |
+ FIELD_PREP(OP_LDF_SW, swap) |
+ FIELD_PREP(OP_LDF_ZF, zero) |
+ FIELD_PREP(OP_LDF_BMASK, bmask) |
+ FIELD_PREP(OP_LDF_SHF, shift) |
+ FIELD_PREP(OP_LDF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
+ u32 dst, u8 bmask, u32 src, bool zero)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
+ reg.i8, zero, reg.swap, reg.wr_both);
+}
+
+static void
+emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+ enum shf_sc sc, u8 shift)
+{
+ emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+}
+
+/* --- Wrappers --- */
+static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
+{
+ if (!(imm & 0xffff0000)) {
+ *val = imm;
+ *shift = IMMED_SHIFT_0B;
+ } else if (!(imm & 0xff0000ff)) {
+ *val = imm >> 8;
+ *shift = IMMED_SHIFT_1B;
+ } else if (!(imm & 0x0000ffff)) {
+ *val = imm >> 16;
+ *shift = IMMED_SHIFT_2B;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+{
+ enum immed_shift shift;
+ u16 val;
+
+ if (pack_immed(imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
+ } else if (pack_immed(~imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
+ } else {
+ emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
+ false, IMMED_SHIFT_0B);
+ emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
+ false, IMMED_SHIFT_2B);
+ }
+}
+
+/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(UR_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+/* re_load_imm_any() - encode immediate or use tmp register (restricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(RE_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+static void
+wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
+ enum br_special special)
+{
+ emit_br(nfp_prog, mask, 0, 0);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_BR_SPECIAL, special);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+}
+
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
+ u16 src, bool src_valid, u8 size)
+{
+ unsigned int i;
+ u16 shift, sz;
+ u32 tmp_reg;
+
+ /* We load the value from the address indicated in @offset and then
+ * shift out the data we don't need. Note: this is big endian!
+ */
+ sz = size < 4 ? 4 : size;
+ shift = size < 4 ? 4 - size : 0;
+
+ if (src_valid) {
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog),
+ reg_a(src), ALU_OP_ADD, tmp_reg);
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
+ } else {
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
+ imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
+ }
+
+ i = 0;
+ if (shift)
+ emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ reg_xfer(0), SHF_SC_R_SHF, shift * 8);
+ else
+ for (; i * 4 < size; i++)
+ emit_alu(nfp_prog, reg_both(i),
+ reg_none(), ALU_OP_NONE, reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ return 0;
+}
+
+static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+{
+ return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+}
+
+static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+{
+ emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
+ reg_none(), ALU_OP_NONE, reg_b(src));
+ emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
+ NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+
+ return 0;
+}
+
+static void
+wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
+{
+ u32 tmp_reg;
+
+ if (alu_op == ALU_OP_AND) {
+ if (!imm)
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_OR) {
+ if (!~imm)
+ wrp_immed(nfp_prog, reg_both(dst), ~0U);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_XOR) {
+ if (!~imm)
+ emit_alu(nfp_prog, reg_both(dst), reg_none(),
+ ALU_OP_NEG, reg_b(dst));
+ if (!imm || !~imm)
+ return;
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
+}
+
+static int
+wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
+
+ return 0;
+}
+
+static int
+wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ emit_alu(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), alu_op, reg_b(src + 1));
+
+ return 0;
+}
+
+static int
+wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int
+wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static void
+wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
+ enum br_mask br_mask, u16 off)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
+ emit_br(nfp_prog, br_mask, off, 0);
+}
+
+static int
+wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, enum br_mask br_mask)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
+ insn->src_reg * 2, br_mask, insn->off);
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
+
+ return 0;
+}
+
+static int
+wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u8 reg = insn->dst_reg * 2;
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+static int
+wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (swap) {
+ areg ^= breg;
+ breg ^= areg;
+ areg ^= breg;
+ }
+
+ emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+/* --- Callbacks --- */
+static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+
+ return 0;
+}
+
+static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
+
+ return 0;
+}
+
+static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
+}
+
+static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_ADD,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
+
+ return 0;
+}
+
+static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_SUB,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
+
+ return 0;
+}
+
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+
+ return 0;
+}
+
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+}
+
+static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
+}
+
+static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+}
+
+static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
+}
+
+static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+}
+
+static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (!insn->imm)
+ return 1; /* TODO: zero shift means indirect */
+
+ emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
+ SHF_SC_L_SHF, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
+ meta->insn.imm);
+
+ return 0;
+}
+
+static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ meta->double_cb = imm_ld8_part2;
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+
+ return 0;
+}
+
+static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+}
+
+static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+}
+
+static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+}
+
+static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 1);
+}
+
+static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 2);
+}
+
+static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 4);
+}
+
+static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, len))
+ emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
+ else
+ return -ENOTSUPP;
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, mark))
+ return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+
+ return -ENOTSUPP;
+}
+
+static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off < 0) /* TODO */
+ return -ENOTSUPP;
+ emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
+
+ return 0;
+}
+
+static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ or1 = imm_a(nfp_prog);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ or2 = imm_b(nfp_prog);
+ }
+
+ emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ meta->skip = true;
+ return 0;
+ }
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ return 0;
+}
+
+static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ return 0;
+}
+
+static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(),
+ imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
+}
+
+static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
+}
+
+static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+
+ return 0;
+}
+
+static const instr_cb_t instr_cb[256] = {
+ [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
+ [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
+ [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
+ [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
+ [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
+ [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
+ [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
+ [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
+ [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
+ [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
+ [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
+ [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
+ [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
+ [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
+ [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
+ [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
+ [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
+ [BPF_ALU | BPF_AND | BPF_X] = and_reg,
+ [BPF_ALU | BPF_AND | BPF_K] = and_imm,
+ [BPF_ALU | BPF_OR | BPF_X] = or_reg,
+ [BPF_ALU | BPF_OR | BPF_K] = or_imm,
+ [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
+ [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
+ [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
+ [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
+ [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
+ [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
+ [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
+ [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
+ [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
+ [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_JMP | BPF_JA | BPF_K] = jump,
+ [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
+ [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
+ [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
+ [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
+ [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_EXIT] = goto_out,
+};
+
+/* --- Misc code --- */
+static void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+/* --- Assembler logic --- */
+static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *next;
+ u32 off, br_idx;
+ u32 idx;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ if (meta->skip)
+ continue;
+ if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ continue;
+
+ br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (!nfp_is_br(nfp_prog->prog[br_idx])) {
+ pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
+ br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
+ return -ELOOP;
+ }
+ /* Leave special branches for later */
+ if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ continue;
+
+ /* Find the target offset in assembler realm */
+ off = meta->insn.off;
+ if (!off) {
+ pr_err("Fixup found zero offset!!\n");
+ return -ELOOP;
+ }
+
+ while (off && nfp_meta_has_next(nfp_prog, next)) {
+ next = nfp_meta_next(next);
+ off--;
+ }
+ if (off) {
+ pr_err("Fixup found too large jump!! %d\n", off);
+ return -ELOOP;
+ }
+
+ if (next->skip) {
+ pr_err("Branch landing on removed instruction!!\n");
+ return -ELOOP;
+ }
+
+ for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
+ idx <= br_idx; idx++) {
+ if (!nfp_is_br(nfp_prog->prog[idx]))
+ continue;
+ br_set_offset(&nfp_prog->prog[idx], next->off);
+ }
+ }
+
+ /* Fixup 'goto out's separately, they can be scattered around */
+ for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
+ enum br_special special;
+
+ if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
+ continue;
+
+ special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
+ switch (special) {
+ case OP_BR_NORMAL:
+ break;
+ case OP_BR_GO_OUT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_out);
+ break;
+ case OP_BR_GO_ABORT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_abort);
+ break;
+ }
+
+ nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
+ }
+
+ return 0;
+}
+
+static void nfp_intro(struct nfp_prog *nfp_prog)
+{
+ emit_alu(nfp_prog, pkt_reg(nfp_prog),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+}
+
+static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
+{
+ const u8 act2code[] = {
+ [NN_ACT_TC_DROP] = 0x22,
+ [NN_ACT_TC_REDIR] = 0x24
+ };
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+ /* Legacy TC mode:
+ * 0 0x11 -> pass, count as stat0
+ * -1 drop 0x22 -> drop, count as stat1
+ * redir 0x24 -> redir, count as stat1
+ * ife mark 0x21 -> pass, count as stat1
+ * ife + tx 0x24 -> redir, count as stat1
+ */
+ emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
+ SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
+{
+ /* TC direct-action mode:
+ * 0,1 ok NOT SUPPORTED[1]
+ * 2 drop 0x22 -> drop, count as stat1
+ * 4,5 nuke 0x02 -> drop
+ * 7 redir 0x44 -> redir, count as stat2
+ * * unspec 0x11 -> pass, count as stat0
+ *
+ * [1] We can't support OK and RECLASSIFY because we can't tell TC
+ * the exact decision made. We are forced to support UNSPEC
+ * to handle aborts so that's the only one we handle for passing
+ * packets up the stack.
+ */
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+
+ /* if R0 > 7 jump to abort */
+ emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
+ emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+
+ wrp_immed(nfp_prog, reg_b(2), 0x41221211);
+ wrp_immed(nfp_prog, reg_b(3), 0x41001211);
+
+ emit_shf(nfp_prog, reg_a(1),
+ reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_a(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_b(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_shf(nfp_prog, reg_b(2),
+ reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro(struct nfp_prog *nfp_prog)
+{
+ switch (nfp_prog->act) {
+ case NN_ACT_DIRECT:
+ nfp_outro_tc_da(nfp_prog);
+ break;
+ case NN_ACT_TC_DROP:
+ case NN_ACT_TC_REDIR:
+ nfp_outro_tc_legacy(nfp_prog);
+ break;
+ }
+}
+
+static int nfp_translate(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int err;
+
+ nfp_intro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ instr_cb_t cb = instr_cb[meta->insn.code];
+
+ meta->off = nfp_prog_current_offset(nfp_prog);
+
+ if (meta->skip) {
+ nfp_prog->n_translated++;
+ continue;
+ }
+
+ if (nfp_meta_has_prev(nfp_prog, meta) &&
+ nfp_meta_prev(meta)->double_cb)
+ cb = nfp_meta_prev(meta)->double_cb;
+ if (!cb)
+ return -ENOENT;
+ err = cb(nfp_prog, meta);
+ if (err)
+ return err;
+
+ nfp_prog->n_translated++;
+ }
+
+ nfp_outro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ return nfp_fixup_branches(nfp_prog);
+}
+
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->insn = prog[i];
+ meta->n = i;
+
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
+
+ return 0;
+}
+
+/* --- Optimizations --- */
+static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ /* Programs converted from cBPF start with register xoring */
+ if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
+ insn.src_reg == insn.dst_reg)
+ continue;
+
+ /* Programs start with R6 = R1 but we ignore the skb pointer */
+ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn.src_reg == 1 && insn.dst_reg == 6)
+ meta->skip = true;
+
+ /* Return as soon as something doesn't match */
+ if (!meta->skip)
+ return;
+ }
+}
+
+/* Try to rename registers so that program uses only low ones */
+static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
+{
+ bool reg_used[MAX_BPF_REG] = {};
+ u8 tgt_reg[MAX_BPF_REG] = {};
+ struct nfp_insn_meta *meta;
+ unsigned int i, j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (meta->skip)
+ continue;
+
+ reg_used[meta->insn.src_reg] = true;
+ reg_used[meta->insn.dst_reg] = true;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
+ if (!reg_used[i])
+ continue;
+
+ tgt_reg[i] = j++;
+ }
+ nfp_prog->num_regs = j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
+ meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
+ }
+
+ return 0;
+}
+
+/* Remove masking after load since our load guarantees this is not needed */
+static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ const s32 exp_mask[] = {
+ [BPF_B] = 0x000000ffU,
+ [BPF_H] = 0x0000ffffU,
+ [BPF_W] = 0xffffffffU,
+ };
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn insn, next;
+
+ insn = meta1->insn;
+ next = meta2->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+
+ if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
+ continue;
+
+ if (!exp_mask[BPF_SIZE(insn.code)])
+ continue;
+ if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
+ continue;
+
+ if (next.src_reg || next.dst_reg)
+ continue;
+
+ meta2->skip = true;
+ }
+}
+
+static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2, *meta3;
+
+ nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
+ struct bpf_insn insn, next1, next2;
+
+ insn = meta1->insn;
+ next1 = meta2->insn;
+ next2 = meta3->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+ if (BPF_SIZE(insn.code) != BPF_W)
+ continue;
+
+ if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
+ !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
+ continue;
+
+ if (next1.src_reg || next1.dst_reg ||
+ next2.src_reg || next2.dst_reg)
+ continue;
+
+ if (next1.imm != 0x20 || next2.imm != 0x20)
+ continue;
+
+ meta2->skip = true;
+ meta3->skip = true;
+ }
+}
+
+static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
+{
+ int ret;
+
+ nfp_bpf_opt_reg_init(nfp_prog);
+
+ ret = nfp_bpf_opt_reg_rename(nfp_prog);
+ if (ret)
+ return ret;
+
+ nfp_bpf_opt_ld_mask(nfp_prog);
+ nfp_bpf_opt_ld_shift(nfp_prog);
+
+ return 0;
+}
+
+/**
+ * nfp_bpf_jit() - translate BPF code into NFP assembly
+ * @filter: kernel BPF filter struct
+ * @prog_mem: memory to store assembler instructions
+ * @act: action attached to this eBPF program
+ * @prog_start: offset of the first instruction when loaded
+ * @prog_done: where to jump on exit
+ * @prog_sz: size of @prog_mem in instructions
+ * @res: achieved parameters of translation results
+ */
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
+ enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res)
+{
+ struct nfp_prog *nfp_prog;
+ int ret;
+
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->act = act;
+ nfp_prog->start_off = prog_start;
+ nfp_prog->tgt_done = prog_done;
+
+ ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
+ if (ret)
+ goto out;
+
+ ret = nfp_prog_verify(nfp_prog, filter);
+ if (ret)
+ goto out;
+
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ goto out;
+
+ if (nfp_prog->num_regs <= 7)
+ nfp_prog->regs_per_thread = 16;
+ else
+ nfp_prog->regs_per_thread = 32;
+
+ nfp_prog->prog = prog_mem;
+ nfp_prog->__prog_alloc_len = prog_sz;
+
+ ret = nfp_translate(nfp_prog);
+ if (ret) {
+ pr_err("Translation failed with error %d (translated: %u)\n",
+ ret, nfp_prog->n_translated);
+ ret = -EINVAL;
+ }
+
+ res->n_instr = nfp_prog->prog_len;
+ res->dense_mode = nfp_prog->num_regs <= 7;
+out:
+ nfp_prog_free(nfp_prog);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
new file mode 100644
index 000000000000..144cae87f63a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
+#include <linux/pkt_cls.h>
+
+#include "nfp_bpf.h"
+
+/* Analyzer/verifier definitions */
+struct nfp_bpf_analyzer_priv {
+ struct nfp_prog *prog;
+ struct nfp_insn_meta *meta;
+};
+
+static struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns)
+{
+ unsigned int forward, backward, i;
+
+ backward = meta->n - insn_idx;
+ forward = insn_idx - meta->n;
+
+ if (min(forward, backward) > n_insns - insn_idx - 1) {
+ backward = n_insns - insn_idx - 1;
+ meta = nfp_prog_last_meta(nfp_prog);
+ }
+ if (min(forward, backward) > insn_idx && backward > insn_idx) {
+ forward = insn_idx;
+ meta = nfp_prog_first_meta(nfp_prog);
+ }
+
+ if (forward < backward)
+ for (i = 0; i < forward; i++)
+ meta = nfp_meta_next(meta);
+ else
+ for (i = 0; i < backward; i++)
+ meta = nfp_meta_prev(meta);
+
+ return meta;
+}
+
+static int
+nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+
+ if (reg0->type != CONST_IMM) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act != NN_ACT_DIRECT &&
+ reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT &&
+ reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN &&
+ reg0->imm != TC_ACT_QUEUED) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env, u8 reg)
+{
+ if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
+ struct nfp_insn_meta *meta = priv->meta;
+
+ meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
+ priv->meta = meta;
+
+ if (meta->insn.src_reg == BPF_REG_10 ||
+ meta->insn.dst_reg == BPF_REG_10) {
+ pr_err("stack not yet supported\n");
+ return -EINVAL;
+ }
+ if (meta->insn.src_reg >= MAX_BPF_REG ||
+ meta->insn.dst_reg >= MAX_BPF_REG) {
+ pr_err("program uses extended registers - jit hardening?\n");
+ return -EINVAL;
+ }
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ return nfp_bpf_check_exit(priv->prog, env);
+
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.src_reg);
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.dst_reg);
+
+ return 0;
+}
+
+static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+ .insn_hook = nfp_verify_insn,
+};
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
+{
+ struct nfp_bpf_analyzer_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->prog = nfp_prog;
+ priv->meta = nfp_prog_first_meta(nfp_prog);
+
+ ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
+
+ kfree(priv);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 690635660195..ed824e11a1e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -62,6 +62,9 @@
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
+/* Interval for reading offloaded filter stats */
+#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
+
/* Bar allocation */
#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
@@ -220,7 +223,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -266,6 +269,8 @@ struct nfp_net_rx_desc {
};
};
+#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -405,6 +410,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
fw_ver->minor == minor;
}
+struct nfp_stat_pair {
+ u64 pkts;
+ u64 bytes;
+};
+
/**
* struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device
@@ -413,6 +423,7 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @is_vf: Is the driver attached to a VF?
* @is_nfp3200: Is the driver for a NFP-3200 card?
* @fw_loaded: Is the firmware loaded?
+ * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -427,6 +438,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
@@ -473,6 +489,7 @@ struct nfp_net {
unsigned is_vf:1;
unsigned is_nfp3200:1;
unsigned fw_loaded:1;
+ unsigned bpf_offload_skip_sw:1;
u32 ctrl;
u32 fl_bufsz;
@@ -502,6 +519,11 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+
int max_tx_rings;
int max_rx_rings;
@@ -561,12 +583,28 @@ struct nfp_net {
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
+static inline u16 nn_readb(struct nfp_net *nn, int off)
+{
+ return readb(nn->ctrl_bar + off);
+}
+
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
writeb(val, nn->ctrl_bar + off);
}
-/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */
+/* NFP-3200 can't handle 16-bit accesses too well */
+static inline u16 nn_readw(struct nfp_net *nn, int off)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ return readw(nn->ctrl_bar + off);
+}
+
+static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ writew(val, nn->ctrl_bar + off);
+}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
@@ -757,4 +795,9 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
}
#endif /* CONFIG_NFP_NET_DEBUG */
+void nfp_net_filter_stats_timer(unsigned long data);
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf);
+
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 39dadfca84ef..aee3fd2b6538 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -60,6 +60,7 @@
#include <linux/ktime.h>
+#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "nfp_net_ctrl.h"
@@ -1292,38 +1293,72 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-/**
- * nfp_net_set_hash() - Set SKB hash data
- * @netdev: adapter's net_device structure
- * @skb: SKB to set the hash data on
- * @rxd: RX descriptor
- *
- * The RSS hash and hash-type are pre-pended to the packet data.
- * Extract and decode it and set the skb fields.
- */
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+ unsigned int type, __be32 *hash)
{
- struct nfp_net_rx_hash *rx_hash;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
- !(netdev->features & NETIF_F_RXHASH))
+ if (!(netdev->features & NETIF_F_RXHASH))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- switch (be32_to_cpu(rx_hash->hash_type)) {
+ switch (type) {
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
break;
default:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
break;
}
}
+static void
+nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
+ struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
+
+ nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+static void *
+nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
+ int meta_len)
+{
+ u8 *data = skb->data - meta_len;
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_net_set_hash(netdev, skb,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ skb->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ default:
+ return NULL;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data;
+}
+
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1438,14 +1473,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_reserve(skb, nn->rx_offset);
skb_put(skb, data_len - meta_len);
- nfp_net_set_hash(nn->netdev, skb, rxd);
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
r_vec->rx_bytes += skb->len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (nn->fw_ver.major <= 3) {
+ nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
+ if (unlikely(end != skb->data)) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ dev_kfree_skb_any(skb);
+ nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
+ continue;
+ }
+ }
+
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, nn->netdev);
@@ -2386,6 +2436,31 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
return stats;
}
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -ENOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
+ return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
+
+ return -EINVAL;
+}
+
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2440,6 +2515,11 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
+ if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ nn_err(nn, "Cannot disable HW TC offload while in use\n");
+ return -EBUSY;
+ }
+
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
@@ -2589,6 +2669,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
+ .ndo_setup_tc = nfp_net_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
@@ -2614,7 +2695,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2631,7 +2712,8 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
- nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "");
+ nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
+ nfp_net_ebpf_capable(nn) ? "BPF " : "");
}
/**
@@ -2674,10 +2756,13 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
+ spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
+ setup_timer(&nn->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
@@ -2799,6 +2884,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->features = netdev->hw_features;
+ if (nfp_net_ebpf_capable(nn))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ad6c4e31cedd..93b10b441acb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -66,6 +66,13 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
/**
+ * Prepend field types
+ */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+#define NFP_NET_META_MARK 2
+
+/**
* Hash type pre-pended when a RSS hash was computed
*/
#define NFP_NET_RSS_NONE 0
@@ -123,6 +130,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
+#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -134,6 +142,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -196,10 +205,37 @@
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
- * 64B reserved for future use (0x0080 - 0x00c0)
+ * NFP6000 - BPF section
+ * @NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * @NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
-#define NFP_NET_CFG_RESERVED 0x0080
-#define NFP_NET_CFG_RESERVED_SZ 0x0040
+#define NFP_NET_CFG_BPF_ABI 0x0080
+#define NFP_NET_BPF_ABI 1
+#define NFP_NET_CFG_BPF_CAP 0x0081
+#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
+#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
+#define NFP_NET_CFG_BPF_START 0x0084
+#define NFP_NET_CFG_BPF_DONE 0x0086
+#define NFP_NET_CFG_BPF_STACK_SZ 0x0088
+#define NFP_NET_CFG_BPF_INL_MTU 0x0089
+#define NFP_NET_CFG_BPF_SIZE 0x008e
+#define NFP_NET_CFG_BPF_ADDR 0x0090
+#define NFP_NET_CFG_BPF_CFG_8CTX (1 << 0) /* 8ctx mode */
+#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
+#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
+
+/**
+ * 40B reserved for future use (0x0098 - 0x00c0)
+ */
+#define NFP_NET_CFG_RESERVED 0x0098
+#define NFP_NET_CFG_RESERVED_SZ 0x0028
/**
* RSS configuration (0x0100 - 0x01ac):
@@ -303,6 +339,15 @@
#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+#define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90)
+#define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98)
+#define NFP_NET_CFG_STATS_APP1_FRAMES (NFP_NET_CFG_STATS_BASE + 0xa0)
+#define NFP_NET_CFG_STATS_APP1_BYTES (NFP_NET_CFG_STATS_BASE + 0xa8)
+#define NFP_NET_CFG_STATS_APP2_FRAMES (NFP_NET_CFG_STATS_BASE + 0xb0)
+#define NFP_NET_CFG_STATS_APP2_BYTES (NFP_NET_CFG_STATS_BASE + 0xb8)
+#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
+#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
+
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 4c9897220969..3418f2277e9d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -106,6 +106,18 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
{"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
{"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
{"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
+
+ {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
+ {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
+ /* see comments in outro functions in nfp_bpf_jit.c to find out
+ * how different BPF modes use app-specific counters
+ */
+ {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
+ {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
+ {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
+ {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
+ {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
+ {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
new file mode 100644
index 000000000000..8acfb631a0ea
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_offload.c
+ * Netronome network device driver: TC offload functions for PF and VF
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "nfp_bpf.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+void nfp_net_filter_stats_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+ struct nfp_stat_pair latest;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ mod_timer(&nn->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
+
+ spin_unlock_bh(&nn->rx_filter_lock);
+
+ latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+
+ if (latest.pkts != nn->rx_filter.pkts)
+ nn->rx_filter_change = jiffies;
+
+ nn->rx_filter = latest;
+}
+
+static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+{
+ nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ nn->rx_filter_prev = nn->rx_filter;
+ nn->rx_filter_change = jiffies;
+}
+
+static int
+nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 bytes, pkts;
+
+ pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
+ bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ bytes -= pkts * ETH_HLEN;
+
+ nn->rx_filter_prev = nn->rx_filter;
+
+ preempt_disable();
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int
+nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ /* TC direct action */
+ if (cls_bpf->exts_integrated) {
+ if (tc_no_actions(cls_bpf->exts))
+ return NN_ACT_DIRECT;
+
+ return -ENOTSUPP;
+ }
+
+ /* TC legacy mode */
+ if (!tc_single_action(cls_bpf->exts))
+ return -ENOTSUPP;
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a))
+ return NN_ACT_TC_DROP;
+
+ if (is_tcf_mirred_redirect(a) &&
+ tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ return NN_ACT_TC_REDIR;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int
+nfp_net_bpf_offload_prepare(struct nfp_net *nn,
+ struct tc_cls_bpf_offload *cls_bpf,
+ struct nfp_bpf_result *res,
+ void **code, dma_addr_t *dma_addr, u16 max_instr)
+{
+ unsigned int code_sz = max_instr * sizeof(u64);
+ enum nfp_bpf_action_type act;
+ u16 start_off, done_off;
+ unsigned int max_mtu;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return -ENOTSUPP;
+
+ ret = nfp_net_bpf_get_act(nn, cls_bpf);
+ if (ret < 0)
+ return ret;
+ act = ret;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (max_mtu < nn->netdev->mtu) {
+ nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ return -ENOTSUPP;
+ }
+
+ start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
+ *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
+ GFP_KERNEL);
+ if (!*code)
+ return -ENOMEM;
+
+ ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
+ max_instr, res);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ return ret;
+}
+
+static void
+nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
+ void *code, dma_addr_t dma_addr,
+ unsigned int code_sz, unsigned int n_instr,
+ bool dense_mode)
+{
+ u64 bpf_addr = dma_addr;
+ int err;
+
+ nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+
+ if (dense_mode)
+ bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
+
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
+
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
+
+ /* Enable passing packets through BPF function */
+ nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+
+ dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+
+ nfp_net_bpf_stats_reset(nn);
+ mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+}
+
+static int nfp_net_bpf_stop(struct nfp_net *nn)
+{
+ if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ return 0;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+ nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ spin_unlock_bh(&nn->rx_filter_lock);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+
+ del_timer_sync(&nn->rx_filter_stats_timer);
+ nn->bpf_offload_skip_sw = 0;
+
+ return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+}
+
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct nfp_bpf_result res;
+ dma_addr_t dma_addr;
+ u16 max_instr;
+ void *code;
+ int err;
+
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ /* There is nothing stopping us from implementing seamless
+ * replace but the simple method of loading I adopted in
+ * the firmware does not handle atomic replace (i.e. we have to
+ * stop the BPF offload and re-enable it). Leaking-in a few
+ * frames which didn't have BPF applied in the hardware should
+ * be fine if software fallback is available, though.
+ */
+ if (nn->bpf_offload_skip_sw)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_stop(nn);
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_ADD:
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_stop(nn);
+
+ case TC_CLSBPF_STATS:
+ return nfp_net_bpf_stats_update(nn, cls_bpf);
+
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index f7062cb648e1..2800bbf65a89 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -148,7 +148,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 3:
+ case 1 ... 4:
if (is_nfp3200) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 87b7b814778b..712d8bcb7d8c 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -751,7 +751,7 @@ static void netdev_rx(struct net_device *dev)
dev_err(&pdev->dev, "rx crc err\n");
ether->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
- dev_err(&pdev->dev, "rx aligment err\n");
+ dev_err(&pdev->dev, "rx alignment err\n");
ether->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6ba48406899e..0df1391f9663 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -88,6 +88,9 @@ config QED
---help---
This enables the support for ...
+config QED_LL2
+ bool
+
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
@@ -104,4 +107,15 @@ config QEDE
---help---
This enables the support for ...
+config INFINIBAND_QEDR
+ tristate "QLogic qede RoCE sources [debug]"
+ depends on QEDE && 64BIT
+ select QED_LL2
+ default n
+ ---help---
+ This provides a temporary node that allows the compilation
+ and logical testing of the InfiniBand over Ethernet support
+ for QLogic QED. This would be replaced by the 'real' option
+ once the QEDR driver is added [+relocated].
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index d1f157e439cf..cda0af7fbc20 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -2,5 +2,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o
+ qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 45ab74676573..653bb5735f0c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,10 +23,11 @@
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
+#include "qed_debug.h"
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.1.20"
+#define DRV_MODULE_VERSION "8.10.9.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -34,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
+#define QED_WID_SIZE (1024)
+#define QED_PF_DEMS_SIZE (4)
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -42,11 +46,21 @@ enum qed_coalescing_mode {
struct qed_eth_cb_ops;
struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * QED_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
@@ -69,6 +83,7 @@ struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
+struct qed_ll2_info;
struct qed_mcp_info;
struct qed_rt_data {
@@ -148,13 +163,17 @@ enum QED_RESOURCES {
QED_RL,
QED_MAC,
QED_VLAN,
+ QED_RDMA_CNQ_RAM,
QED_ILT,
+ QED_LL2_QUEUE,
+ QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
+ QED_RDMA_CNQ,
QED_MAX_FEATURES,
};
@@ -357,6 +376,9 @@ struct qed_hwfn {
struct qed_sb_attn_info *p_sb_attn;
/* Protocol related */
+ bool using_ll2;
+ struct qed_ll2_info *p_ll2_info;
+ struct qed_rdma_info *p_rdma_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -393,6 +415,19 @@ struct qed_hwfn {
/* Buffer for unzipping firmware data */
void *unzip_buf;
+ struct dbg_tools_data dbg_info;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+
+ /* This is used to calculate the doorbell address */
+ u32 dpi_start_offset;
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -402,6 +437,7 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -426,6 +462,21 @@ struct qed_int_params {
bool fp_initialized;
u8 fp_msix_base;
u8 fp_msix_cnt;
+ u8 rdma_msix_base;
+ u8 rdma_msix_cnt;
+};
+
+struct qed_dbg_feature {
+ struct dentry *dentry;
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+struct qed_dbg_params {
+ struct qed_dbg_feature features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+ bool print_data;
};
struct qed_dev {
@@ -442,6 +493,8 @@ struct qed_dev {
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
@@ -517,7 +570,6 @@ struct qed_dev {
bool b_is_vf;
u32 drv_type;
-
struct qed_eth_stats *reset_stats;
struct qed_fw_data *fw_data;
@@ -542,7 +594,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
+ struct qed_dbg_params dbg_params;
+
+#ifdef CONFIG_QED_LL2
+ struct qed_cb_ll2_info *ll2;
+ u8 ll2_mac_address[ETH_ALEN];
+#endif
+
const struct firmware *firmware;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
};
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
@@ -606,7 +669,9 @@ void qed_link_update(struct qed_hwfn *hwfn);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
-
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1c35f376143e..82370a1a59ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -48,7 +48,13 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
+#define ILT_DEFAULT_HW_P_SIZE 4
+#else
#define ILT_DEFAULT_HW_P_SIZE 3
+#endif
+
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -377,9 +383,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +410,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
return cnt;
}
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type, u32 count, bool has_fl)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +425,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
- u32 start_line, u32 total_size,
- u32 elem_size)
+ u32 start_line, u32 total_size, u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
@@ -448,8 +452,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_cli->first.val = *p_line;
p_cli->active = true;
- *p_line += DIV_ROUND_UP(p_blk->total_size,
- p_blk->real_size_in_page);
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -795,10 +798,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
@@ -926,12 +928,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
u32 size;
- size = min_t(u32, sz_left,
- p_blk->real_size_in_page);
+ size = min_t(u32, sz_left, p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- &p_phys,
- GFP_KERNEL);
+ size, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
@@ -963,7 +962,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
rc = -ENOMEM;
goto ilt_shadow_fail;
}
@@ -976,7 +974,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +983,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
}
@@ -1056,10 +1054,8 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
u32 i;
p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
- if (!p_mngr) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ if (!p_mngr)
return -ENOMEM;
- }
/* Initialize ILT client registers */
clients = p_mngr->clients;
@@ -1111,24 +1107,18 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = qed_ilt_shadow_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate the T2 table */
rc = qed_cxt_src_t2_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ if (rc)
goto tables_alloc_fail;
- }
return 0;
@@ -1672,7 +1662,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
tm_offset += tm_iids.pf_tids[i];
}
@@ -1702,8 +1692,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid)
+ enum protocol_type type, u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
@@ -1717,8 +1706,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
- DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
- type);
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
@@ -1730,8 +1718,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid,
- enum protocol_type *p_type)
+ u32 cid, enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1750,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
return true;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
@@ -1781,8 +1767,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
- struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
@@ -1860,6 +1845,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
+ if (p_hwfn->using_ll2)
+ core_cids += 4;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c6f6f2e8192d..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -170,6 +170,13 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 3656d2fd673d..130da1c0490b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -875,11 +875,8 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
int rc = 0;
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
- if (!p_hwfn->p_dcbx_info) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate 'struct qed_dcbx_info'\n");
+ if (!p_hwfn->p_dcbx_info)
rc = -ENOMEM;
- }
return rc;
}
@@ -1190,10 +1187,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
}
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ if (!dcbx_info)
return -ENOMEM;
- }
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
@@ -1227,10 +1222,8 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
struct qed_dcbx_get *dcbx_info;
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ if (!dcbx_info)
return NULL;
- }
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
@@ -1982,6 +1975,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
if (!dcbx_info->operational.ieee) {
DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
return -EINVAL;
}
@@ -2150,17 +2144,19 @@ static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
return rc;
}
-int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
{
return qed_dcbnl_get_ieee_ets(cdev, ets, true);
}
-int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
{
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
-int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
@@ -2204,7 +2200,7 @@ int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return 0;
}
-int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644
index 000000000000..88e7d5bef909
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -0,0 +1,6898 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Chip IDs enum */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_K2,
+ MAX_CHIP_IDS
+};
+
+/* Memory groups enum */
+enum mem_groups {
+ MEM_GROUP_PXP_MEM,
+ MEM_GROUP_DMAE_MEM,
+ MEM_GROUP_CM_MEM,
+ MEM_GROUP_QM_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_BRB_RAM,
+ MEM_GROUP_BRB_MEM,
+ MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
+ MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
+ MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CONN_CFC_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
+ MEM_GROUP_CAU_PI,
+ MEM_GROUP_CAU_MEM,
+ MEM_GROUP_PXP_ILT,
+ MEM_GROUP_MULD_MEM,
+ MEM_GROUP_BTB_MEM,
+ MEM_GROUP_IGU_MEM,
+ MEM_GROUP_IGU_MSIX,
+ MEM_GROUP_CAU_SB,
+ MEM_GROUP_BMB_RAM,
+ MEM_GROUP_BMB_MEM,
+ MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+ "PXP_MEM",
+ "DMAE_MEM",
+ "CM_MEM",
+ "QM_MEM",
+ "TM_MEM",
+ "BRB_RAM",
+ "BRB_MEM",
+ "PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
+ "IOR",
+ "RAM",
+ "BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CONN_CFC_MEM",
+ "TASK_CFC_MEM",
+ "CAU_PI",
+ "CAU_MEM",
+ "PXP_ILT",
+ "MULD_MEM",
+ "BTB_MEM",
+ "IGU_MEM",
+ "IGU_MSIX",
+ "CAU_SB",
+ "BMB_RAM",
+ "BMB_MEM",
+};
+
+/* Idle check conditions */
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+ return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+ return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) !=
+ (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+ return r[0] != imm[0];
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+ return r[0] & imm[0];
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+ return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+ return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+ cond0,
+ cond1,
+ cond2,
+ cond3,
+ cond4,
+ cond5,
+ cond6,
+ cond7,
+ cond8,
+ cond9,
+ cond10,
+ cond11,
+ cond12,
+};
+
+/******************************* Data Types **********************************/
+
+enum platform_ids {
+ PLATFORM_ASIC,
+ PLATFORM_RESERVED,
+ PLATFORM_RESERVED2,
+ PLATFORM_RESERVED3,
+ MAX_PLATFORM_IDS
+};
+
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+ const char *name;
+ struct {
+ u8 num_ports;
+ u8 num_pfs;
+ } per_platform[MAX_PLATFORM_IDS];
+};
+
+/* Platform constant definitions */
+struct platform_defs {
+ const char *name;
+ u32 delay_factor;
+};
+
+/* Storm constant definitions */
+struct storm_defs {
+ char letter;
+ enum block_id block_id;
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ bool has_vfc;
+ u32 sem_fast_mem_addr;
+ u32 sem_frame_mode_addr;
+ u32 sem_slow_enable_addr;
+ u32 sem_slow_mode_addr;
+ u32 sem_slow_mode1_conf_addr;
+ u32 sem_sync_dbg_empty_addr;
+ u32 sem_slow_dbg_empty_addr;
+ u32 cm_ctx_wr_addr;
+ u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_rd_addr;
+ u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_rd_addr;
+ u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_rd_addr;
+ u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_rd_addr;
+};
+
+/* Block constant definitions */
+struct block_defs {
+ const char *name;
+ bool has_dbg_bus[MAX_CHIP_IDS];
+ bool associated_to_storm;
+ u32 storm_id; /* Valid only if associated_to_storm is true */
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ u32 dbg_select_addr;
+ u32 dbg_cycle_enable_addr;
+ u32 dbg_shift_addr;
+ u32 dbg_force_valid_addr;
+ u32 dbg_force_frame_addr;
+ bool has_reset_bit;
+ bool unreset; /* If true, the block is taken out of reset before dump */
+ enum dbg_reset_regs reset_reg;
+ u8 reset_bit_offset; /* Bit offset in reset register */
+};
+
+/* Reset register definitions */
+struct reset_reg_defs {
+ u32 addr;
+ u32 unreset_val;
+ bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+ u32 default_val[MAX_CHIP_IDS];
+ u32 min;
+ u32 max;
+ bool is_preset;
+ u32 exclude_all_preset_val;
+ u32 crash_preset_val;
+};
+
+struct rss_mem_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 addr; /* In 128b units */
+ u32 num_entries[MAX_CHIP_IDS];
+ u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+};
+
+struct vfc_ram_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 base_row;
+ u32 num_rows;
+};
+
+struct big_ram_defs {
+ const char *instance_name;
+ enum mem_groups mem_group_id;
+ enum mem_groups ram_mem_group_id;
+ enum dbg_grc_params grc_param;
+ u32 addr_reg_addr;
+ u32 data_reg_addr;
+ u32 num_of_blocks[MAX_CHIP_IDS];
+};
+
+struct phy_defs {
+ const char *phy_name;
+ u32 base_addr;
+ u32 tbus_addr_lo_addr;
+ u32 tbus_addr_hi_addr;
+ u32 tbus_data_lo_addr;
+ u32 tbus_data_hi_addr;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_LCIDS 320
+#define MAX_LTIDS 320
+#define NUM_IOR_SETS 2
+#define IORS_PER_SET 176
+#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+#define BYTES_IN_DWORD sizeof(u32)
+
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+ (int)(FIELD_BIT_OFFSET(type, field) / 32)
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+ FIELD_DWORD_SHIFT(type, field))
+#define SET_VAR_FIELD(var, type, field, val) \
+ do { \
+ var[FIELD_DWORD_OFFSET(type, field)] &= \
+ (~FIELD_BIT_MASK(type, field)); \
+ var[FIELD_DWORD_OFFSET(type, field)] |= \
+ (val) << FIELD_DWORD_SHIFT(type, field); \
+ } while (0)
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, addr, (arr)[i]); \
+ } while (0)
+#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ (arr)[i] = qed_rd(dev, ptt, addr); \
+ } while (0)
+
+#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+#define REG_DUMP_LEN_SHIFT 24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define IDLE_CHK_MAX_ENTRIES_SIZE 32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE 64
+#define VFC_CAM_CMD_ROW_OFFSET 48
+#define VFC_CAM_CMD_ROW_SIZE 9
+#define VFC_CAM_ADDR_STRUCT_SIZE 16
+#define VFC_CAM_ADDR_OP_OFFSET 0
+#define VFC_CAM_ADDR_OP_SIZE 4
+#define VFC_CAM_RESP_STRUCT_SIZE 256
+#define VFC_RAM_ADDR_STRUCT_SIZE 16
+#define VFC_RAM_ADDR_OP_OFFSET 0
+#define VFC_RAM_ADDR_OP_SIZE 2
+#define VFC_RAM_ADDR_ROW_OFFSET 2
+#define VFC_RAM_ADDR_ROW_SIZE 10
+#define VFC_RAM_RESP_STRUCT_SIZE 256
+#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+#define NUM_VFC_RAM_TYPES 4
+#define VFC_CAM_NUM_ROWS 512
+#define VFC_OPCODE_CAM_RD 14
+#define VFC_OPCODE_RAM_RD 0
+#define NUM_RSS_MEM_TYPES 5
+#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_BLOCK_SIZE_BYTES 128
+#define BIG_RAM_BLOCK_SIZE_DWORDS \
+ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+#define NUM_PHY_TBUS_ADDRESSES 2048
+#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+#define RESET_REG_UNRESET_OFFSET 4
+#define STALL_DELAY_MS 500
+#define STATIC_DEBUG_LINE_DWORDS 9
+#define NUM_DBG_BUS_LINES 256
+#define NUM_COMMON_GLOBAL_PARAMS 8
+#define FW_IMG_MAIN 1
+#define REG_FIFO_DEPTH_ELEMENTS 32
+#define REG_FIFO_ELEMENT_DWORDS 2
+#define REG_FIFO_DEPTH_DWORDS \
+ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+#define IGU_FIFO_DEPTH_ELEMENTS 64
+#define IGU_FIFO_ELEMENT_DWORDS 4
+#define IGU_FIFO_DEPTH_DWORDS \
+ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+ (MCP_REG_SCRATCH + \
+ offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+#define EMPTY_FW_VERSION_STR "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* Debug arrays */
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+ { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
+ { "bb_b0",
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
+ { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+ /* Tstorm */
+ {'T', BLOCK_TSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TCM_REG_CTX_RBC_ACCS,
+ 4, TCM_REG_AGG_CON_CTX,
+ 16, TCM_REG_SM_CON_CTX,
+ 2, TCM_REG_AGG_TASK_CTX,
+ 4, TCM_REG_SM_TASK_CTX},
+ /* Mstorm */
+ {'M', BLOCK_MSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCM}, false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MCM_REG_CTX_RBC_ACCS,
+ 1, MCM_REG_AGG_CON_CTX,
+ 10, MCM_REG_SM_CON_CTX,
+ 2, MCM_REG_AGG_TASK_CTX,
+ 7, MCM_REG_SM_TASK_CTX},
+ /* Ustorm */
+ {'U', BLOCK_USEM,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ UCM_REG_CTX_RBC_ACCS,
+ 2, UCM_REG_AGG_CON_CTX,
+ 13, UCM_REG_SM_CON_CTX,
+ 3, UCM_REG_AGG_TASK_CTX,
+ 3, UCM_REG_SM_TASK_CTX},
+ /* Xstorm */
+ {'X', BLOCK_XSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XCM_REG_CTX_RBC_ACCS,
+ 9, XCM_REG_AGG_CON_CTX,
+ 15, XCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0},
+ /* Ystorm */
+ {'Y', BLOCK_YSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCY}, false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YCM_REG_CTX_RBC_ACCS,
+ 2, YCM_REG_AGG_CON_CTX,
+ 3, YCM_REG_SM_CON_CTX,
+ 2, YCM_REG_AGG_TASK_CTX,
+ 12, YCM_REG_SM_TASK_CTX},
+ /* Pstorm */
+ {'P', BLOCK_PSEM,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PCM_REG_CTX_RBC_ACCS,
+ 0, 0,
+ 10, PCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0}
+};
+
+/* Block definitions array */
+static struct block_defs block_grc_defs = {
+ "grc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
+ GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
+ GRC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_UA, 1
+};
+
+static struct block_defs block_miscs_defs = {
+ "miscs", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_defs = {
+ "misc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbu_defs = {
+ "dbu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pglue_b_defs = {
+ "pglue_b", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
+ PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
+ PGLUE_B_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 1
+};
+
+static struct block_defs block_cnig_defs = {
+ "cnig", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
+ CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
+ CNIG_REG_DBG_FORCE_FRAME_K2,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 0
+};
+
+static struct block_defs block_cpmu_defs = {
+ "cpmu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 8
+};
+
+static struct block_defs block_ncsi_defs = {
+ "ncsi", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
+ NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
+ NCSI_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 5
+};
+
+static struct block_defs block_opte_defs = {
+ "opte", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 4
+};
+
+static struct block_defs block_bmb_defs = {
+ "bmb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
+ BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
+ BMB_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 7
+};
+
+static struct block_defs block_pcie_defs = {
+ "pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp_defs = {
+ "mcp", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp2_defs = {
+ "mcp2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
+ MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
+ MCP2_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pswhst_defs = {
+ "pswhst", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
+ PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
+ PSWHST_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswhst2_defs = {
+ "pswhst2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
+ PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
+ PSWHST2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswrd_defs = {
+ "pswrd", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
+ PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
+ PSWRD_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswrd2_defs = {
+ "pswrd2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
+ PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
+ PSWRD2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswwr_defs = {
+ "pswwr", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
+ PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
+ PSWWR_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswwr2_defs = {
+ "pswwr2", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswrq_defs = {
+ "pswrq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
+ PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
+ PSWRQ_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pswrq2_defs = {
+ "pswrq2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
+ PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
+ PSWRQ2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pglcs_defs = {
+ "pglcs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
+ PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
+ PGLCS_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 2
+};
+
+static struct block_defs block_ptu_defs = {
+ "ptu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
+ PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
+ PTU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
+};
+
+static struct block_defs block_dmae_defs = {
+ "dmae", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
+ DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
+ DMAE_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
+};
+
+static struct block_defs block_tcm_defs = {
+ "tcm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
+ TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
+ TCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
+};
+
+static struct block_defs block_mcm_defs = {
+ "mcm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
+ MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
+ MCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
+};
+
+static struct block_defs block_ucm_defs = {
+ "ucm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
+ UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
+ UCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
+};
+
+static struct block_defs block_xcm_defs = {
+ "xcm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
+ XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
+ XCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
+};
+
+static struct block_defs block_ycm_defs = {
+ "ycm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
+ YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
+ YCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
+};
+
+static struct block_defs block_pcm_defs = {
+ "pcm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
+ PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
+ PCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
+};
+
+static struct block_defs block_qm_defs = {
+ "qm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
+ QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
+ QM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
+};
+
+static struct block_defs block_tm_defs = {
+ "tm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
+ TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
+ TM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
+};
+
+static struct block_defs block_dorq_defs = {
+ "dorq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
+ DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
+ DORQ_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
+};
+
+static struct block_defs block_brb_defs = {
+ "brb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
+ BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
+ BRB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
+};
+
+static struct block_defs block_src_defs = {
+ "src", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
+ SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
+ SRC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
+};
+
+static struct block_defs block_prs_defs = {
+ "prs", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
+ PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
+ PRS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
+};
+
+static struct block_defs block_tsdm_defs = {
+ "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
+ TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
+ TSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
+};
+
+static struct block_defs block_msdm_defs = {
+ "msdm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
+ MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
+ MSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
+};
+
+static struct block_defs block_usdm_defs = {
+ "usdm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
+ USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
+ USDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
+};
+
+static struct block_defs block_xsdm_defs = {
+ "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
+ XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
+ XSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
+};
+
+static struct block_defs block_ysdm_defs = {
+ "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
+ YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
+ YSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
+};
+
+static struct block_defs block_psdm_defs = {
+ "psdm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
+ PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
+ PSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
+};
+
+static struct block_defs block_tsem_defs = {
+ "tsem", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
+ TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
+ TSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
+};
+
+static struct block_defs block_msem_defs = {
+ "msem", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
+ MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
+ MSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
+};
+
+static struct block_defs block_usem_defs = {
+ "usem", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
+ USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
+ USEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
+};
+
+static struct block_defs block_xsem_defs = {
+ "xsem", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
+ XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
+ XSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
+};
+
+static struct block_defs block_ysem_defs = {
+ "ysem", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
+ YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
+ YSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
+};
+
+static struct block_defs block_psem_defs = {
+ "psem", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
+ PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
+ PSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
+};
+
+static struct block_defs block_rss_defs = {
+ "rss", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
+ RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
+ RSS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
+};
+
+static struct block_defs block_tmld_defs = {
+ "tmld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
+ TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
+ TMLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
+};
+
+static struct block_defs block_muld_defs = {
+ "muld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
+ MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
+ MULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
+};
+
+static struct block_defs block_yuld_defs = {
+ "yuld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
+ YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
+ YULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+};
+
+static struct block_defs block_xyld_defs = {
+ "xyld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
+ XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
+ XYLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
+};
+
+static struct block_defs block_prm_defs = {
+ "prm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
+ PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
+ PRM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
+};
+
+static struct block_defs block_pbf_pb1_defs = {
+ "pbf_pb1", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
+ PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
+ PBF_PB1_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 11
+};
+
+static struct block_defs block_pbf_pb2_defs = {
+ "pbf_pb2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
+ PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
+ PBF_PB2_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 12
+};
+
+static struct block_defs block_rpb_defs = {
+ "rpb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
+ RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
+ RPB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
+};
+
+static struct block_defs block_btb_defs = {
+ "btb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
+ BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
+ BTB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
+};
+
+static struct block_defs block_pbf_defs = {
+ "pbf", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
+ PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
+ PBF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
+};
+
+static struct block_defs block_rdif_defs = {
+ "rdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
+ RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
+ RDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
+};
+
+static struct block_defs block_tdif_defs = {
+ "tdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
+ TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
+ TDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
+};
+
+static struct block_defs block_cdu_defs = {
+ "cdu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
+ CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
+ CDU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
+};
+
+static struct block_defs block_ccfc_defs = {
+ "ccfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
+ CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
+ CCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
+};
+
+static struct block_defs block_tcfc_defs = {
+ "tcfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
+ TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
+ TCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
+};
+
+static struct block_defs block_igu_defs = {
+ "igu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
+ IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
+ IGU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
+};
+
+static struct block_defs block_cau_defs = {
+ "cau", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
+ CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
+ CAU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
+};
+
+static struct block_defs block_umac_defs = {
+ "umac", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
+ UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
+ UMAC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 6
+};
+
+static struct block_defs block_xmac_defs = {
+ "xmac", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbg_defs = {
+ "dbg", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
+};
+
+static struct block_defs block_nig_defs = {
+ "nig", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
+ NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
+ NIG_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
+};
+
+static struct block_defs block_wol_defs = {
+ "wol", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
+ WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
+ WOL_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
+};
+
+static struct block_defs block_bmbn_defs = {
+ "bmbn", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
+ BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
+ BMBN_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ipc_defs = {
+ "ipc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 8
+};
+
+static struct block_defs block_nwm_defs = {
+ "nwm", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
+ NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
+ NWM_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
+};
+
+static struct block_defs block_nws_defs = {
+ "nws", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 12
+};
+
+static struct block_defs block_ms_defs = {
+ "ms", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 13
+};
+
+static struct block_defs block_phy_pcie_defs = {
+ "phy_pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_led_defs = {
+ "led", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_misc_aeu_defs = {
+ "misc_aeu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_bar0_map_defs = {
+ "bar0_map", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
+ &block_grc_defs,
+ &block_miscs_defs,
+ &block_misc_defs,
+ &block_dbu_defs,
+ &block_pglue_b_defs,
+ &block_cnig_defs,
+ &block_cpmu_defs,
+ &block_ncsi_defs,
+ &block_opte_defs,
+ &block_bmb_defs,
+ &block_pcie_defs,
+ &block_mcp_defs,
+ &block_mcp2_defs,
+ &block_pswhst_defs,
+ &block_pswhst2_defs,
+ &block_pswrd_defs,
+ &block_pswrd2_defs,
+ &block_pswwr_defs,
+ &block_pswwr2_defs,
+ &block_pswrq_defs,
+ &block_pswrq2_defs,
+ &block_pglcs_defs,
+ &block_dmae_defs,
+ &block_ptu_defs,
+ &block_tcm_defs,
+ &block_mcm_defs,
+ &block_ucm_defs,
+ &block_xcm_defs,
+ &block_ycm_defs,
+ &block_pcm_defs,
+ &block_qm_defs,
+ &block_tm_defs,
+ &block_dorq_defs,
+ &block_brb_defs,
+ &block_src_defs,
+ &block_prs_defs,
+ &block_tsdm_defs,
+ &block_msdm_defs,
+ &block_usdm_defs,
+ &block_xsdm_defs,
+ &block_ysdm_defs,
+ &block_psdm_defs,
+ &block_tsem_defs,
+ &block_msem_defs,
+ &block_usem_defs,
+ &block_xsem_defs,
+ &block_ysem_defs,
+ &block_psem_defs,
+ &block_rss_defs,
+ &block_tmld_defs,
+ &block_muld_defs,
+ &block_yuld_defs,
+ &block_xyld_defs,
+ &block_prm_defs,
+ &block_pbf_pb1_defs,
+ &block_pbf_pb2_defs,
+ &block_rpb_defs,
+ &block_btb_defs,
+ &block_pbf_defs,
+ &block_rdif_defs,
+ &block_tdif_defs,
+ &block_cdu_defs,
+ &block_ccfc_defs,
+ &block_tcfc_defs,
+ &block_igu_defs,
+ &block_cau_defs,
+ &block_umac_defs,
+ &block_xmac_defs,
+ &block_dbg_defs,
+ &block_nig_defs,
+ &block_wol_defs,
+ &block_bmbn_defs,
+ &block_ipc_defs,
+ &block_nwm_defs,
+ &block_nws_defs,
+ &block_ms_defs,
+ &block_phy_pcie_defs,
+ &block_led_defs,
+ &block_misc_aeu_defs,
+ &block_bar0_map_defs,
+};
+
+static struct platform_defs s_platform_defs[] = {
+ {"asic", 1},
+ {"reserved", 0},
+ {"reserved2", 0},
+ {"reserved3", 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+ { "rss_mem_cid", "rss_cid", 0,
+ {256, 256, 320},
+ {32, 32, 32} },
+ { "rss_mem_key_msb", "rss_key", 1024,
+ {128, 128, 208},
+ {256, 256, 256} },
+ { "rss_mem_key_lsb", "rss_key", 2048,
+ {128, 128, 208},
+ {64, 64, 64} },
+ { "rss_mem_info", "rss_info", 3072,
+ {128, 128, 208},
+ {16, 16, 16} },
+ { "rss_mem_ind", "rss_ind", 4096,
+ {(128 * 128), (128 * 128), (128 * 208)},
+ {16, 16, 16} }
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+ {"vfc_ram_tt1", "vfc_ram", 0, 512},
+ {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+ {"vfc_ram_stt2", "vfc_ram", 640, 32},
+ {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+ { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ {4800, 4800, 5632} },
+ { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ {2880, 2880, 3680} },
+ { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ {1152, 1152, 1152} }
+};
+
+static struct reset_reg_defs s_reset_regs_defs[] = {
+ { MISCS_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ { MISCS_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ { MISCS_REG_RESET_PL_HV_2, 0x0,
+ {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISC_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ { MISC_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+};
+
+static struct phy_defs s_phy_defs[] = {
+ {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
+ {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+};
+
+/**************************** Private Functions ******************************/
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+ u32 dword;
+
+ memcpy((u8 *)&dword, buf, sizeof(dword));
+ return dword;
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (dev_data->initialized)
+ return DBG_STATUS_OK;
+
+ if (QED_IS_K2(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_K2;
+ dev_data->mode_enable[MODE_K2] = 1;
+ } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_BB_B0;
+ dev_data->mode_enable[MODE_BB_B0] = 1;
+ } else {
+ return DBG_STATUS_UNKNOWN_CHIP;
+ }
+
+ dev_data->platform_id = PLATFORM_ASIC;
+ dev_data->mode_enable[MODE_ASIC] = 1;
+ dev_data->initialized = true;
+ return DBG_STATUS_OK;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
+{
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(struct fw_info_location);
+ struct fw_info_location fw_info_location;
+ u32 *dest = (u32 *)&fw_info_location;
+ u32 i;
+
+ memset(&fw_info_location, 0, sizeof(fw_info_location));
+ memset(fw_info, 0, sizeof(*fw_info));
+ for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ if (fw_info_location.size > 0 && fw_info_location.size <=
+ sizeof(*fw_info)) {
+ /* Read FW version info from Storm RAM */
+ addr = fw_info_location.grc_addr;
+ dest = (u32 *)fw_info;
+ for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+/* Dumps the specified string to the specified buffer. Returns the dumped size
+ * in bytes (actual length + 1 for the null character termination).
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+ if (dump)
+ strcpy(dump_buf, str);
+ return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
+ * in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+ u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+
+ align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+ if (dump && align_size)
+ memset(dump_buf, 0, align_size);
+ return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+ bool dump,
+ const char *param_name, const char *param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a string param value */
+ if (dump)
+ *(char_buf + offset) = 1;
+ offset++;
+
+ /* Dump param value */
+ offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+ return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+ bool dump, const char *param_name, u32 param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a numeric param value */
+ if (dump)
+ *(char_buf + offset) = 0;
+ offset++;
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ /* Dump param value (and change offset from bytes to dwords) */
+ offset = BYTES_TO_DWORDS(offset);
+ if (dump)
+ *(dump_buf + offset) = param_val;
+ offset++;
+ return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+ char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+ struct fw_info fw_info = { {0}, {0} };
+ int printed_chars;
+ u32 offset = 0;
+
+ if (dump) {
+ /* Read FW image/version from PRAM in a non-reset SEMI */
+ bool found = false;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+ storm_id++) {
+ /* Read FW version/image */
+ if (!dev_data->block_in_reset
+ [s_storm_defs[storm_id].block_id]) {
+ /* read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn,
+ p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ printed_chars =
+ snprintf(fw_ver_str,
+ sizeof(fw_ver_str),
+ "%d_%d_%d_%d",
+ fw_info.ver.num.major,
+ fw_info.ver.num.minor,
+ fw_info.ver.num.rev,
+ fw_info.ver.num.eng);
+ if (printed_chars < 0 || printed_chars >=
+ sizeof(fw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
+ }
+
+ found = true;
+ }
+ }
+ }
+
+ /* Dump FW version, image and timestamp */
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-version", fw_ver_str);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-image", fw_img_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fw-timestamp", fw_info.ver.timestamp);
+ return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+ if (dump) {
+ u32 global_section_offsize, global_section_addr, mfw_ver;
+ u32 public_data_addr, global_section_offsize_addr;
+ int printed_chars;
+
+ /* Find MCP public data GRC address.
+ * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ */
+ public_data_addr = qed_rd(p_hwfn, p_ptt,
+ MISC_REG_SHARED_MEM_ADDR) |
+ MCP_REG_SCRATCH;
+
+ /* Find MCP public global section offset */
+ global_section_offsize_addr = public_data_addr +
+ offsetof(struct mcp_public_data,
+ sections) +
+ sizeof(offsize_t) * PUBLIC_GLOBAL;
+ global_section_offsize = qed_rd(p_hwfn, p_ptt,
+ global_section_offsize_addr);
+ global_section_addr = MCP_REG_SCRATCH +
+ (global_section_offsize &
+ OFFSIZE_OFFSET_MASK) * 4;
+
+ /* Read MFW version from MCP public global section */
+ mfw_ver = qed_rd(p_hwfn, p_ptt,
+ global_section_addr +
+ offsetof(struct public_global, mfw_ver));
+
+ /* Dump MFW version param */
+ printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
+ "%d_%d_%d_%d",
+ (u8) (mfw_ver >> 24),
+ (u8) (mfw_ver >> 16),
+ (u8) (mfw_ver >> 8),
+ (u8) mfw_ver);
+ if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid MFW version string\n");
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+ bool dump, const char *name, u32 num_params)
+{
+ return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 num_specific_global_params)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+
+ /* Find platform string and dump global params section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump,
+ "global_params",
+ NUM_COMMON_GLOBAL_PARAMS +
+ num_specific_global_params);
+
+ /* Store params */
+ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_mfw_ver_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "tools-version", TOOLS_VERSION);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "chip",
+ s_chip_defs[dev_data->chip_id].name);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "platform",
+ s_platform_defs[dev_data->platform_id].
+ name);
+ offset +=
+ qed_dump_num_param(dump_buf + offset, dump, "pci-func",
+ p_hwfn->abs_pf_id);
+ return offset;
+}
+
+/* Writes the last section to the specified buffer at the given offset.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+ u32 start_offset = offset, crc = ~0;
+
+ /* Dump CRC section header */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+ /* Calculate CRC32 and add it to the dword following the "last" section.
+ */
+ if (dump)
+ *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ DWORDS_TO_BYTES(offset));
+ offset++;
+ return offset - start_offset;
+}
+
+/* Update blocks reset state */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Read reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++)
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id])
+ reg_val[i] = qed_rd(p_hwfn,
+ p_ptt, s_reset_regs_defs[i].addr);
+
+ /* Check if blocks are in reset */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ dev_data->block_in_reset[i] =
+ s_block_defs[i]->has_reset_bit &&
+ !(reg_val[s_block_defs[i]->reset_reg] &
+ BIT(s_block_defs[i]->reset_bit_offset));
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+
+ dbg_reset_reg_addr =
+ s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
+ new_reset_reg_val = old_reset_reg_val &
+ ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
+}
+
+static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_bus_frame_modes mode)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask.
+ * (1 = enable, 0 = disable)
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 client_mask)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ bool arg1, arg2;
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match(p_hwfn, modes_buf_offset);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ }
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Clear all GRC params */
+static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_set_by_user[i] = 0;
+}
+
+/* Assign default GRC param values */
+static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ if (!dev_data->grc.param_set_by_user[i])
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+ enum dbg_storms storm)
+{
+ return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, u8 mem_group_id)
+{
+ u8 i;
+
+ /* Check Storm match */
+ if (s_block_defs[block_id]->associated_to_storm &&
+ !qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ return false;
+
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
+ mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn,
+ s_big_ram_defs[i].grc_param);
+ if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
+ MEM_GROUP_PXP_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ if (mem_group_id == MEM_GROUP_RAM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ if (mem_group_id == MEM_GROUP_PBUF)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ if (mem_group_id == MEM_GROUP_CAU_MEM ||
+ mem_group_id == MEM_GROUP_CAU_SB ||
+ mem_group_id == MEM_GROUP_CAU_PI)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ if (mem_group_id == MEM_GROUP_QM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
+ mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
+ MEM_GROUP_IGU_MSIX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ if (mem_group_id == MEM_GROUP_MULD_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ if (mem_group_id == MEM_GROUP_PRS_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ if (mem_group_id == MEM_GROUP_TM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ if (mem_group_id == MEM_GROUP_SDM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
+ MEM_GROUP_RDIF_CTX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ if (mem_group_id == MEM_GROUP_CM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ if (mem_group_id == MEM_GROUP_IOR)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+
+ return true;
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool stall)
+{
+ u8 reg_val = stall ? 1 : 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ u32 reg_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0;
+
+ qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
+ }
+ }
+
+ msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Fill reset regs values */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
+ reg_val[s_block_defs[i]->reset_reg] |=
+ BIT(s_block_defs[i]->reset_bit_offset);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ }
+ }
+}
+
+/* Returns the attention name offsets of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+{
+ const struct dbg_attn_block *base_attn_block_arr =
+ (const struct dbg_attn_block *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+ return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+ u8 *num_attn_regs)
+{
+ const struct dbg_attn_block_type_data *block_type_data =
+ qed_get_block_attn_data(block_id, attn_type);
+
+ *num_attn_regs = block_type_data->num_regs;
+ return &((const struct dbg_attn_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
+ regs_offset];
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 reg_idx, num_attn_regs;
+ u32 block_id;
+
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id])
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+
+ /* Check mode */
+ bool eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ /* Mode match - read parity status read-clear
+ * register.
+ */
+ qed_rd(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(reg_data->
+ sts_clr_address));
+ }
+ }
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * The following parameters are dumped:
+ * - 'count' = num_dumped_entries
+ * - 'split' = split_type
+ * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
+ * param_val != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+ bool dump,
+ u32 num_reg_entries,
+ const char *split_type,
+ int split_id,
+ const char *param_name, const char *param_val)
+{
+ u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+ u32 offset = 0;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_regs", num_params);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "count", num_reg_entries);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "split", split_type);
+ if (split_id >= 0)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "id", split_id);
+ if (param_name && param_val)
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, param_name, param_val);
+ return offset;
+}
+
+/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 offset = 0, i;
+
+ if (dump) {
+ *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
+ for (i = 0; i < len; i++, addr++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr));
+ } else {
+ offset += len + 1;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ u32 *num_dumped_reg_entries)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ *num_dumped_reg_entries = 0;
+ while (input_offset < input_regs_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr =
+ (const struct dbg_dump_cond_hdr *)
+ &input_regs_arr.ptr[input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check mode/block */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match && block_enable[cond_hdr->block_id]) {
+ for (i = 0; i < cond_hdr->data_size;
+ i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset, dump,
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS),
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_LENGTH));
+ (*num_dumped_reg_entries)++;
+ }
+ } else {
+ input_offset += cond_hdr->data_size;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *split_type_name,
+ u32 split_id,
+ const char *param_name,
+ const char *param_val)
+{
+ u32 num_dumped_reg_entries, offset;
+
+ /* Calculate register dump header size (and skip it for now) */
+ offset = qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ /* Dump registers */
+ offset += qed_grc_dump_regs_entries(p_hwfn,
+ p_ptt,
+ input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ &num_dumped_reg_entries);
+
+ /* Write register dump header */
+ if (dump && num_dumped_reg_entries > 0)
+ qed_grc_dump_regs_hdr(dump_buf,
+ dump,
+ num_dumped_reg_entries,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *param_name, const char *param_val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, input_offset = 0;
+ u8 port_id, pf_id;
+
+ if (dump)
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_regs_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ case SPLIT_TYPE_VF:
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ "eng",
+ (u32)(-1),
+ param_name,
+ param_val);
+ break;
+ case SPLIT_TYPE_PORT:
+ for (port_id = 0;
+ port_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_ports;
+ port_id++) {
+ if (dump)
+ qed_port_pretend(p_hwfn, p_ptt,
+ port_id);
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "port", port_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_PF:
+ case SPLIT_TYPE_PORT_PF:
+ for (pf_id = 0;
+ pf_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_pfs;
+ pf_id++) {
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id, param_name,
+ param_val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ /* Pretend to original PF */
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i, offset = 0, num_regs = 0;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs
+ [i].addr), 1);
+ num_regs++;
+ }
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true, num_regs, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be dumped
+ * first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, num_reg_entries = 0, block_id;
+ u8 storm_id, reg_idx, num_attn_regs;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write parity registers */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id] && dump)
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
+ /* Mode match - read and dump registers */
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ reg_data->mask_address,
+ 1);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS),
+ 1);
+ num_reg_entries += 2;
+ }
+ }
+ }
+
+ /* Write storm stall status registers */
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
+ dump)
+ continue;
+
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].
+ sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED),
+ 1);
+ num_reg_entries++;
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dumps a GRC memory header (section and params).
+ * The following parameters are dumped:
+ * name - name is dumped only if it's not NULL.
+ * addr - byte_addr is dumped only if name is NULL.
+ * len - dword_len is always dumped.
+ * width - bit_width is dumped if it's not zero.
+ * packed - packed=1 is dumped if it's not false.
+ * mem_group - mem_group is always dumped.
+ * is_storm - true only if the memory is related to a Storm.
+ * storm_letter - storm letter (valid only if is_storm is true).
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u8 num_params = 3;
+ u32 offset = 0;
+ char buf[64];
+
+ if (!dword_len)
+ DP_NOTICE(p_hwfn,
+ "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+ if (bit_width)
+ num_params++;
+ if (packed)
+ num_params++;
+
+ /* Dump section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_mem", num_params);
+ if (name) {
+ /* Dump name */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), name);
+ } else {
+ strcpy(buf, name);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "name", buf);
+ if (dump)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from %s...\n",
+ dword_len, buf);
+ } else {
+ /* Dump address */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "addr", byte_addr);
+ if (dump && dword_len > 64)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from address 0x%x...\n",
+ dword_len, byte_addr);
+ }
+
+ /* Dump len */
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+
+ /* Dump bit width */
+ if (bit_width)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "width", bit_width);
+
+ /* Dump packed */
+ if (packed)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "packed", 1);
+
+ /* Dump reg type */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), mem_group);
+ } else {
+ strcpy(buf, mem_group);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+ return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ byte_addr,
+ dword_len,
+ bit_width,
+ packed,
+ mem_group, is_storm, storm_letter);
+ if (dump) {
+ u32 i;
+
+ for (i = 0; i < dword_len;
+ i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ } else {
+ offset += dword_len;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_mems_arr,
+ u32 *dump_buf, bool dump)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ while (input_offset < input_mems_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr;
+ u32 num_entries;
+ bool eval_mode;
+
+ cond_hdr = (const struct dbg_dump_cond_hdr *)
+ &input_mems_arr.ptr[input_offset++];
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check required mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+ for (i = 0; i < num_entries;
+ i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+ const struct dbg_dump_mem *mem =
+ (const struct dbg_dump_mem *)
+ &input_mems_arr.ptr[input_offset];
+ u8 mem_group_id;
+
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ if (mem_group_id >= MEM_GROUPS_NUM) {
+ DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+ return 0;
+ }
+
+ if (qed_grc_is_mem_included(p_hwfn,
+ (enum block_id)cond_hdr->block_id,
+ mem_group_id)) {
+ u32 mem_byte_addr =
+ DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS));
+ u32 mem_len = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_LENGTH);
+ char storm_letter = 'a';
+ bool is_storm = false;
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS)
+ * (mem_len / MAX_LCIDS);
+ else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS)
+ * (mem_len / MAX_LTIDS);
+
+ /* If memory is associated with Storm, update
+ * Storm details.
+ */
+ if (s_block_defs[cond_hdr->block_id]->
+ associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs[
+ cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn, p_ptt,
+ dump_buf + offset, dump, NULL,
+ mem_byte_addr, mem_len, 0,
+ false,
+ s_mem_group_names[mem_group_id],
+ is_storm, storm_letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_mems_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ offset += qed_grc_dump_mem_entries(p_hwfn,
+ p_ptt,
+ curr_input_mems_arr,
+ dump_buf + offset,
+ dump);
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Dumping split memories is currently not supported\n");
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 num_lids,
+ u32 lid_size,
+ u32 rd_reg_addr,
+ u8 storm_id)
+{
+ u32 i, lid, total_size;
+ u32 offset = 0;
+
+ if (!lid_size)
+ return 0;
+ lid_size *= BYTES_IN_DWORD;
+ total_size = num_lids * lid_size;
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ 0,
+ total_size,
+ lid_size * 32,
+ false,
+ name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Dump context data */
+ if (dump) {
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].cm_ctx_wr_addr,
+ BIT(9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ rd_reg_addr);
+ }
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ /* Dump Conn AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Conn ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_rd_addr,
+ storm_id);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC IORs data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ char buf[10] = "IOR_SET_?";
+ u8 storm_id, set_id;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE +
+ DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ s_storm_defs
+ [storm_id].letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 storm_id)
+{
+ u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+ u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ "vfc_cam",
+ 0,
+ total_size,
+ 256,
+ false,
+ "vfc_cam",
+ true, s_storm_defs[storm_id].letter);
+ if (dump) {
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+ u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+ u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ ram_defs->mem_name,
+ 0,
+ total_size,
+ 256,
+ false,
+ ram_defs->type_name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ if (!dump)
+ return offset + total_size;
+
+ for (row = ram_defs->base_row;
+ row < ram_defs->base_row + ram_defs->num_rows;
+ row++, offset += VFC_RAM_RESP_DWORDS) {
+ /* Write VFC RAM command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ ram_cmd, VFC_RAM_CMD_DWORDS);
+
+ /* Write VFC RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ ram_addr, VFC_RAM_ADDR_DWORDS);
+
+ /* Read VFC RAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id, i;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) &&
+ s_storm_defs[storm_id].has_vfc &&
+ (storm_id != DBG_PSTORM_ID ||
+ dev_data->platform_id == PLATFORM_ASIC)) {
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ storm_id,
+ &s_vfc_ram_defs
+ [i]);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 rss_mem_id;
+
+ for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+ struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
+ u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
+ u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
+ u32 total_size = (num_entries * entry_width) / 32;
+ bool packed = (entry_width == 16);
+ u32 addr = rss_defs->addr;
+ u32 i, j;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ rss_defs->mem_name,
+ addr,
+ total_size,
+ entry_width,
+ packed,
+ rss_defs->type_name, false, 0);
+
+ if (!dump) {
+ offset += total_size;
+ continue;
+ }
+
+ /* Dump RSS data */
+ for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
+ for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
+ *(dump_buf + offset) =
+ qed_rd(p_hwfn, p_ptt,
+ RSS_REG_RSS_RAM_DATA +
+ DWORDS_TO_BYTES(j));
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char mem_name[12] = "???_BIG_RAM";
+ char type_name[8] = "???_RAM";
+ u32 ram_size, total_blocks;
+ u32 offset = 0, i, j;
+
+ total_blocks =
+ s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+
+ strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+
+ /* Dump memory header */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ ram_size,
+ BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ false, type_name, false, 0);
+
+ if (!dump)
+ return offset + ram_size;
+
+ /* Read and dump Big RAM data */
+ for (i = 0; i < total_blocks / 2; i++) {
+ qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
+ i);
+ for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
+ s_big_ram_defs[big_ram_id].
+ data_reg_addr +
+ DWORDS_TO_BYTES(j));
+ }
+
+ return offset;
+}
+
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ bool block_enable[MAX_BLOCK_ID] = { 0 };
+ bool halted = false;
+ u32 offset = 0;
+
+ /* Halt MCP */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Dump MCP scratchpad */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_SCRATCH,
+ MCP_REG_SCRATCH_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP cpu_reg_file */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_CPU_REG_FILE,
+ MCP_REG_CPU_REG_FILE_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP registers */
+ block_enable[BLOCK_MCP] = true;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, block_enable, "block", "MCP");
+
+ /* Dump required non-MCP registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+ dump, 1, "eng", -1, "block", "MCP");
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (MISC_REG_SHARED_MEM_ADDR), 1);
+
+ /* Release MCP */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+ return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs. */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+ char mem_name[32];
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+ struct phy_defs *phy_defs = &s_phy_defs[phy_id];
+ int printed_chars;
+
+ printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name);
+ if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid PHY memory name\n");
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ PHY_DUMP_SIZE_DWORDS,
+ 16, true, mem_name, false, 0);
+ if (dump) {
+ u32 addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ u32 addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ u32 data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ u32 data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ u8 *bytes_buf = (u8 *)(dump_buf + offset);
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_hi_addr);
+ }
+ }
+ }
+
+ offset += PHY_DUMP_SIZE_DWORDS;
+ }
+
+ return offset;
+}
+
+static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 cycle_en,
+ u8 right_shift, u8 force_valid, u8 force_frame)
+{
+ struct block_defs *p_block_defs = s_block_defs[block_id];
+
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, block_id, line_id, addr, i;
+ struct block_defs *p_block_defs;
+
+ if (dump) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG, "Dumping static debug data...\n");
+
+ /* Disable all blocks debug output */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (p_block_defs->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ }
+
+ qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+ qed_bus_set_framing_mode(p_hwfn,
+ p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+ }
+
+ /* Dump all static debug lines for each relevant block */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ continue;
+
+ /* Dump static section params */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ p_block_defs->name, 0,
+ block_dwords, 32, false,
+ "STATIC", false, 0);
+
+ if (dump && !dev_data->block_in_reset[block_id]) {
+ u8 dbg_client_id =
+ p_block_defs->dbg_client_id[dev_data->chip_id];
+
+ /* Enable block's client */
+ qed_bus_enable_clients(p_hwfn, p_ptt,
+ BIT(dbg_client_id));
+
+ for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id,
+ 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
+ i < STATIC_DEBUG_LINE_DWORDS;
+ i++, offset++, addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
+ addr);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ } else {
+ /* All lines are invalid - dump zeros */
+ if (dump)
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ }
+ }
+
+ if (dump) {
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ }
+
+ return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ bool parities_masked = false;
+ u8 i, port_mode = 0;
+ u32 offset = 0;
+
+ /* Check if emulation platform */
+ *num_dumped_dwords = 0;
+
+ /* Fill GRC parameters that were not set by the user with their default
+ * value.
+ */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ /* Find port mode */
+ if (dump) {
+ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+ case 0:
+ port_mode = 1;
+ break;
+ case 1:
+ port_mode = 2;
+ break;
+ case 2:
+ port_mode = 4;
+ break;
+ }
+ }
+
+ /* Update reset state */
+ if (dump)
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 4);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "grc-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-lcids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-ltids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "num-ports", port_mode);
+
+ /* Dump reset registers (dumped before taking blocks out of reset ) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_reset_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Take all blocks out of reset (using reset registers) */
+ if (dump) {
+ qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
+
+ /* Disable all parities using MFW command */
+ if (dump) {
+ parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+ if (!parities_masked) {
+ if (qed_grc_get_param
+ (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+ return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
+ }
+ }
+
+ /* Dump modified registers (dumped before modifying them) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_modified_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Stall storms */
+ if (dump &&
+ (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_IOR) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+ qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+ /* Dump all regs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+ /* Dump all blocks except MCP */
+ bool block_enable[MAX_BLOCK_ID];
+
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ block_enable[i] = true;
+ block_enable[BLOCK_MCP] = false;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ block_enable, NULL, NULL);
+ }
+
+ /* Dump memories */
+ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+ offset += qed_grc_dump_mcp(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump context */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+ offset += qed_grc_dump_ctx(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump RSS memories */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+ offset += qed_grc_dump_rss(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump Big RAM */
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+ offset += qed_grc_dump_big_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, i);
+
+ /* Dump IORs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
+ offset += qed_grc_dump_iors(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump VFC */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
+ offset += qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump PHY tbus */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+ CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ offset += qed_grc_dump_phy(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump static debug data */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_STATIC) &&
+ dev_data->bus.state == DBG_BUS_STATE_IDLE)
+ offset += qed_grc_dump_static_debug(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+ if (dump) {
+ /* Unstall storms */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+ qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+ /* Clear parity status */
+ qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+ /* Enable all parities using MFW command */
+ if (parities_masked)
+ qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+ }
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *
+ dump_buf,
+ bool dump,
+ u16 rule_id,
+ const struct dbg_idle_chk_rule *rule,
+ u16 fail_entry_id, u32 *cond_reg_values)
+{
+ const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays
+ [BIN_BUF_DBG_IDLE_CHK_REGS].
+ ptr)[rule->reg_offset];
+ const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct dbg_idle_chk_result_hdr *hdr =
+ (struct dbg_idle_chk_result_hdr *)dump_buf;
+ const struct dbg_idle_chk_info_reg *info_regs =
+ &regs[rule->num_cond_regs].info_reg;
+ u32 next_reg_offset = 0, i, offset = 0;
+ u8 reg_id;
+
+ /* Dump rule data */
+ if (dump) {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->rule_id = rule_id;
+ hdr->mem_entry_id = fail_entry_id;
+ hdr->severity = rule->severity;
+ hdr->num_dumped_cond_regs = rule->num_cond_regs;
+ }
+
+ offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+ /* Dump condition register values */
+ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+
+ /* Write register header */
+ if (dump) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
+ + offset);
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0,
+ sizeof(struct dbg_idle_chk_result_reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0
+ ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size;
+ i++, next_reg_offset++, offset++)
+ dump_buf[offset] =
+ cond_reg_values[next_reg_offset];
+ } else {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+ reg->entry_size;
+ }
+ }
+
+ /* Dump info register values */
+ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+ const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+ u32 block_id;
+
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+ continue;
+ }
+
+ /* Check if register's block is in reset */
+ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ if (!dev_data->block_in_reset[block_id]) {
+ bool eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match =
+ qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 grc_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS));
+
+ /* Write register header */
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg->size;
+ i++, offset++, grc_addr += 4)
+ dump_buf[offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump,
+ const struct dbg_idle_chk_rule *input_rules,
+ u32 num_input_rules, u32 *num_failing_rules)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+ u32 i, j, offset = 0;
+ u16 entry_id;
+ u8 reg_id;
+
+ *num_failing_rules = 0;
+ for (i = 0; i < num_input_rules; i++) {
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_rule *rule;
+ const union dbg_idle_chk_reg *regs;
+ u16 num_reg_entries = 1;
+ bool check_rule = true;
+ const u32 *imm_values;
+
+ rule = &input_rules[i];
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
+ [rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
+ [rule->imm_offset];
+
+ /* Check if all condition register blocks are out of reset, and
+ * find maximal number of entries (all condition registers that
+ * are memories must have the same size, which is > 1).
+ */
+ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+ reg_id++) {
+ u32 block_id = GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ check_rule = !dev_data->block_in_reset[block_id];
+ if (cond_regs[reg_id].num_entries > num_reg_entries)
+ num_reg_entries = cond_regs[reg_id].num_entries;
+ }
+
+ if (!check_rule && dump)
+ continue;
+
+ /* Go over all register entries (number of entries is the same
+ * for all condition registers).
+ */
+ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+ /* Read current entry of all condition registers */
+ if (dump) {
+ u32 next_reg_offset = 0;
+
+ for (reg_id = 0;
+ reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg
+ *reg = &cond_regs[reg_id];
+
+ /* Find GRC address (if it's a memory,
+ * the address of the specific entry is
+ * calculated).
+ */
+ u32 grc_addr =
+ DWORDS_TO_BYTES(
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS));
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two
+ (reg->entry_size) : 1;
+
+ grc_addr +=
+ DWORDS_TO_BYTES(
+ (reg->start_entry +
+ entry_id)
+ * padded_entry_size);
+ }
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ for (j = 0; j < reg->entry_size;
+ j++, next_reg_offset++,
+ grc_addr += 4)
+ cond_reg_values[next_reg_offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+
+ /* Call rule's condition function - a return value of
+ * true indicates failure.
+ */
+ if ((*cond_arr[rule->cond_id])(cond_reg_values,
+ imm_values) || !dump) {
+ offset +=
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
+ (*num_failing_rules)++;
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0, num_failing_rules = 0;
+ u32 num_failing_rules_offset;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "idle-chk");
+
+ /* Dump idle check section header with a single parameter */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+ num_failing_rules_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ const struct dbg_idle_chk_cond_hdr *cond_hdr =
+ (const struct dbg_idle_chk_cond_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
+ [input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 curr_failing_rules;
+
+ offset +=
+ qed_idle_chk_dump_rule_entries(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ (const struct dbg_idle_chk_rule *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
+ ptr[input_offset],
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
+ &curr_failing_rules);
+ num_failing_rules += curr_failing_rules;
+ }
+
+ input_offset += cond_hdr->data_size;
+ }
+
+ /* Overwrite num_rules parameter */
+ if (dump)
+ qed_dump_num_param(dump_buf + num_failing_rules_offset,
+ dump, "num_rules", num_failing_rules);
+
+ return offset;
+}
+
+/* Finds the meta data image in NVRAM. */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+
+ /* Call NVRAM get file command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type, &ret_mcp_resp, &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att) != 0)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
+ u32 bytes_to_copy, read_offset = 0;
+ s32 bytes_left = nvram_size_bytes;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ (nvram_offset_bytes +
+ read_offset) |
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT),
+ &ret_mcp_resp, &ret_mcp_param,
+ &ret_read_size,
+ (u32 *)((u8 *)ret_buf +
+ read_offset)) != 0)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr - the GRC address of the trace data
+ * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
+ * the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *trace_data_grc_addr,
+ u32 *trace_data_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ u32 signature;
+
+ /* Extract MCP trace section GRC address from offsize structure (within
+ * scratchpad).
+ */
+ *trace_data_grc_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+ /* Read signature from MCP trace section */
+ signature = qed_rd(p_hwfn, p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, signature));
+ if (signature != MFW_TRACE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read trace size from MCP trace section */
+ *trace_data_size_bytes = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+ return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM.
+ * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
+ * file)
+ * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
+ * Trace meta data starts (invalid when loaded from file)
+ * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 trace_data_size_bytes,
+ u32 *running_bundle_id,
+ u32 *trace_meta_offset_bytes,
+ u32 *trace_meta_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Find running bundle ID */
+ u32 running_mfw_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+ QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+ enum dbg_status status;
+ u32 nvram_image_type;
+
+ *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+ if (*running_bundle_id > 1)
+ return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+ /* Find image in NVRAM */
+ nvram_image_type =
+ (*running_bundle_id ==
+ DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
+
+ return status;
+}
+
+/* Reads the MCP Trace data from the specified GRC address into the specified
+ * buffer.
+ */
+static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr, u32 size_in_dwords, u32 *buf)
+{
+ u32 i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
+ size_in_dwords, grc_addr);
+ for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
+ buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+}
+
+/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
+ * buffer.
+ */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_in_bytes,
+ u32 size_in_bytes, u32 *buf)
+{
+ u8 *byte_buf = (u8 *)buf;
+ u8 modules_num, i;
+ u32 signature;
+
+ /* Read meta data from NVRAM */
+ enum dbg_status status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Extract and check first signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Extract number of modules */
+ modules_num = *(byte_buf++);
+
+ /* Skip all modules */
+ for (i = 0; i < modules_num; i++) {
+ u8 module_len = *(byte_buf++);
+
+ byte_buf += module_len;
+ }
+
+ /* Extract and check second signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+ return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+ u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ enum dbg_status status;
+ int halted = 0;
+
+ *num_dumped_dwords = 0;
+
+ /* Get trace data info */
+ status = qed_mcp_trace_get_data_info(p_hwfn,
+ p_ptt,
+ &trace_data_grc_addr,
+ &trace_data_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "mcp-trace");
+
+ /* Halt MCP while reading from scratchpad so the read data will be
+ * consistent if halt fails, MCP trace is taken anyway, with a small
+ * risk that it may be corrupt.
+ */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Find trace data size */
+ trace_data_size_dwords =
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
+
+ /* Dump trace data section header and param */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_data", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_data_size_dwords);
+
+ /* Read trace data from scratchpad into dump buffer */
+ if (dump)
+ qed_mcp_trace_read_data(p_hwfn,
+ p_ptt,
+ trace_data_grc_addr,
+ trace_data_size_dwords,
+ dump_buf + offset);
+ offset += trace_data_size_dwords;
+
+ /* Resume MCP (only if halt succeeded) */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ /* Dump trace meta section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_meta", 1);
+
+ /* Read trace meta info */
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump trace meta size param (trace_meta_size_bytes is always
+ * dword-aligned).
+ */
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ trace_meta_size_dwords);
+
+ /* Read trace meta image into dump buffer */
+ if (dump) {
+ status = qed_mcp_trace_read_meta(p_hwfn,
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status != DBG_STATUS_OK)
+ return status;
+ }
+
+ offset += trace_meta_size_dwords;
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump GRC FIFO */
+enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "reg-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "reg_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += REG_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
+ REG_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ REG_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "igu-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "igu_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += IGU_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
+ IGU_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_MEMORY,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ IGU_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, size_param_offset, override_window_dwords;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "protection-override");
+
+ /* Dump data section header and param. The size param is 0 for now, and
+ * is overwritten after reading the data.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "protection_override_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ /* Add override window info to buffer */
+ override_window_dwords =
+ qed_rd(p_hwfn, p_ptt,
+ GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ GRC_REG_PROTECTION_OVERRIDE_WINDOW,
+ (u64)(uintptr_t)(dump_buf + offset),
+ override_window_dwords, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ offset += override_window_dwords;
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char storm_letter_str[2] = "?";
+ struct fw_info fw_info;
+ u32 offset = 0, i;
+ u8 storm_id;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "fw-asserts");
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
+ last_list_idx, element_addr;
+
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Dump FW Asserts section header and params */
+ storm_letter_str[0] = s_storm_defs[storm_id].letter;
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
+ storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ fw_info.fw_asserts_section.
+ list_element_dword_size);
+
+ if (!dump) {
+ offset += fw_info.fw_asserts_section.
+ list_element_dword_size;
+ continue;
+ }
+
+ /* Read and dump FW Asserts data */
+ fw_asserts_section_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
+ section_ram_line_offset);
+ next_list_idx_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_next_index_dword_offset);
+ next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+ last_list_idx = (next_list_idx > 0
+ ? next_list_idx
+ : fw_info.fw_asserts_section.list_num_elements)
+ - 1;
+ element_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_dword_offset) +
+ last_list_idx *
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_element_dword_size);
+ for (i = 0;
+ i < fw_info.fw_asserts_section.list_element_dword_size;
+ i++, offset++, element_addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ }
+
+ /* Dump last section */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ return offset;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* GRC Dump */
+ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Clear all GRC params */
+ qed_dbg_grc_clear_params(p_hwfn);
+ return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ if (!dev_data->idle_chk.buf_size_set) {
+ dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt,
+ NULL, false);
+ dev_data->idle_chk.buf_size_set = true;
+ }
+
+ *buf_size = dev_data->idle_chk.buf_size;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Idle Check Dump */
+ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Perform dump */
+ return qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+ char *format_str;
+};
+
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+};
+
+/* Reg fifo element */
+struct reg_fifo_element {
+ u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
+#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT 24
+#define REG_FIFO_ELEMENT_PF_MASK 0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT 28
+#define REG_FIFO_ELEMENT_VF_MASK 0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT 36
+#define REG_FIFO_ELEMENT_PORT_MASK 0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT 43
+#define REG_FIFO_ELEMENT_MASTER_MASK 0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT 47
+#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+ u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
+ u32 dword1;
+ u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
+ u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+ u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+ u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+ u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
+};
+
+enum igu_fifo_sources {
+ IGU_SRC_PXP0,
+ IGU_SRC_PXP1,
+ IGU_SRC_PXP2,
+ IGU_SRC_PXP3,
+ IGU_SRC_PXP4,
+ IGU_SRC_PXP5,
+ IGU_SRC_PXP6,
+ IGU_SRC_PXP7,
+ IGU_SRC_CAU,
+ IGU_SRC_ATTN,
+ IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+ IGU_ADDR_TYPE_MSIX_MEM,
+ IGU_ADDR_TYPE_WRITE_PBA,
+ IGU_ADDR_TYPE_WRITE_INT_ACK,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+ IGU_ADDR_TYPE_READ_INT,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+ IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+ u16 start_addr;
+ u16 end_addr;
+ char *desc;
+ char *vf_desc;
+ enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN 1024
+#define MCP_TRACE_MAX_MODULE_LEN 8
+#define MCP_TRACE_FORMAT_MAX_PARAMS 3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
+
+/********************************* Macros ************************************/
+
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+ "Operation completed successfully",
+ "Debug application version wasn't set",
+ "Unsupported debug application version",
+ "The debug block wasn't reset since the last recording",
+ "Invalid arguments",
+ "The debug output was already set",
+ "Invalid PCI buffer size",
+ "PCI buffer allocation failed",
+ "A PCI buffer wasn't allocated",
+ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ "GRC/Timestamp input overlap in cycle dword 0",
+ "Cannot record Storm data since the entire recording cycle is used by HW",
+ "The Storm was already enabled",
+ "The specified Storm wasn't enabled",
+ "The block was already enabled",
+ "The specified block wasn't enabled",
+ "No input was enabled for recording",
+ "Filters and triggers are not allowed when recording in 64b units",
+ "The filter was already enabled",
+ "The trigger was already enabled",
+ "The trigger wasn't enabled",
+ "A constraint can be added only after a filter was enabled or a trigger state was added",
+ "Cannot add more than 3 trigger states",
+ "Cannot add more than 4 constraints per filter or trigger state",
+ "The recording wasn't started",
+ "A trigger was configured, but it didn't trigger",
+ "No data was recorded",
+ "Dump buffer is too small",
+ "Dumped data is not aligned to chunks",
+ "Unknown chip",
+ "Failed allocating virtual memory",
+ "The input block is in reset",
+ "Invalid MCP trace signature found in NVRAM",
+ "Invalid bundle ID found in NVRAM",
+ "Failed getting NVRAM image",
+ "NVRAM image is not dword-aligned",
+ "Failed reading from NVRAM",
+ "Idle check parsing failed",
+ "MCP Trace data is corrupt",
+ "Dump doesn't contain meta data - it must be provided in an image file",
+ "Failed to halt MCP",
+ "Failed to resume MCP after halt",
+ "DMAE transaction failed",
+ "Failed to empty SEMI sync FIFO",
+ "IGU FIFO data is corrupt",
+ "MCP failed to mask parities",
+ "FW Asserts parsing failed",
+ "GRC FIFO data is corrupt",
+ "Protection Override data is corrupt",
+ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+ "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+ "Error",
+ "Error if no traffic",
+ "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+ "ERROR",
+ "TRACE",
+ "DEBUG"
+};
+
+/* Parsing strings */
+static const char * const s_access_strs[] = {
+ "read",
+ "write"
+};
+
+static const char * const s_privilege_strs[] = {
+ "VF",
+ "PDA",
+ "HV",
+ "UA"
+};
+
+static const char * const s_protection_strs[] = {
+ "(default)",
+ "(default)",
+ "(default)",
+ "(default)",
+ "override VF",
+ "override PDA",
+ "override HV",
+ "override UA"
+};
+
+static const char * const s_master_strs[] = {
+ "???",
+ "pxp",
+ "mcp",
+ "msdm",
+ "psdm",
+ "ysdm",
+ "usdm",
+ "tsdm",
+ "xsdm",
+ "dbu",
+ "dmae",
+ "???",
+ "???",
+ "???",
+ "???",
+ "???"
+};
+
+static const char * const s_reg_fifo_error_strs[] = {
+ "grc timeout",
+ "address doesn't belong to any block",
+ "reserved address in block or write to read-only address",
+ "privilege/protection mismatch",
+ "path isolation error"
+};
+
+static const char * const s_igu_fifo_source_strs[] = {
+ "TSTORM",
+ "MSTORM",
+ "USTORM",
+ "XSTORM",
+ "YSTORM",
+ "PSTORM",
+ "PCIE",
+ "NIG_QM_PBF",
+ "CAU",
+ "ATTN",
+ "GRC",
+};
+
+static const char * const s_igu_fifo_error_strs[] = {
+ "no error",
+ "length error",
+ "function disabled",
+ "VF sent command to attnetion address",
+ "host sent prod update command",
+ "read of during interrupt register while in MIMD mode",
+ "access to PXP BAR reserved address",
+ "producer update command to attention index",
+ "unknown error",
+ "SB index not valid",
+ "SB relative index and FID not found",
+ "FID not match",
+ "command with error flag asserted (PCI error or CAU discard)",
+ "VF sent cleanup and RF cleanup is disabled",
+ "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+ {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x201, 0x201, "Write PBA[64:127]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+ IGU_ADDR_TYPE_WRITE_INT_ACK},
+ {0x5f0, 0x5f0, "Attention bits update", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f1, 0x5f1, "Attention bits set", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f2, 0x5f2, "Attention bits clear", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* MCP Trace meta data - used in case the dump doesn't contain the meta data
+ * (e.g. due to no NVRAM access).
+ */
+static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+ return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+ return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+ u32 *offset,
+ u32 buf_size, u8 num_bytes_to_read)
+{
+ u8 *bytes_buf = (u8 *)buf;
+ u8 *val_ptr;
+ u32 val = 0;
+ u8 i;
+
+ val_ptr = (u8 *)&val;
+
+ for (i = 0; i < num_bytes_to_read; i++) {
+ val_ptr[i] = bytes_buf[*offset];
+ *offset = qed_cyclic_add(*offset, 1, buf_size);
+ }
+
+ return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+ return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+ u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+ *offset += 4;
+ return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+ const char *source_str = &((const char *)buf)[*offset];
+
+ strncpy(dest, source_str, size);
+ dest[size - 1] = '\0';
+ *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+ return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+ const char **param_name,
+ const char **param_str_val, u32 *param_num_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0; /* In bytes */
+
+ /* Extract param name */
+ *param_name = char_buf;
+ offset += strlen(*param_name) + 1;
+
+ /* Check param type */
+ if (*(char_buf + offset++)) {
+ /* String param */
+ *param_str_val = char_buf + offset;
+ offset += strlen(*param_str_val) + 1;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ } else {
+ /* Numeric param */
+ *param_str_val = NULL;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ *param_num_val = *(u32 *)(char_buf + offset);
+ offset += 4;
+ }
+
+ return offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+ const char **section_name,
+ u32 *num_section_params)
+{
+ const char *param_str_val;
+
+ return qed_read_param(dump_buf,
+ section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+ u32 num_section_params,
+ char *results_buf, u32 *num_chars_printed)
+{
+ u32 i, dump_offset = 0, results_offset = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ const char *param_name;
+ const char *param_str_val;
+ u32 param_num_val = 0;
+
+ dump_offset += qed_read_param(dump_buf + dump_offset,
+ &param_name,
+ &param_str_val, &param_num_val);
+ if (param_str_val)
+ /* String param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %s\n", param_name, param_str_val);
+ else if (strcmp(param_name, "fw-timestamp"))
+ /* Numeric param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %d\n", param_name, param_num_val);
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ *num_chars_printed = results_offset;
+ return dump_offset;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *dump_buf_end,
+ u32 num_rules,
+ bool print_fw_idle_chk,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ u16 i, j;
+
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ /* Go over dumped results */
+ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+ rule_idx++) {
+ const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const char *parsing_str;
+ u32 parsing_str_offset;
+ const char *lsi_msg;
+ u8 curr_reg_id = 0;
+ bool has_fw_msg;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ rule_parsing_data =
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ ptr[hdr->rule_id];
+ parsing_str_offset =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ has_fw_msg =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = &((const char *)
+ s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
+ lsi_msg = parsing_str;
+
+ if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+ return 0;
+
+ /* Skip rule header */
+ dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+
+ /* Update errors/warnings count */
+ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+ hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+ (*num_errors)++;
+ else
+ (*num_warnings)++;
+
+ /* Print rule severity */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s: ",
+ s_idle_chk_severity_str[hdr->severity]);
+
+ /* Print rule message */
+ if (has_fw_msg)
+ parsing_str += strlen(parsing_str) + 1;
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s.",
+ has_fw_msg &&
+ print_fw_idle_chk ? parsing_str : lsi_msg);
+ parsing_str += strlen(parsing_str) + 1;
+
+ /* Print register values */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " Registers:");
+ for (i = 0;
+ i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+ i++) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr
+ = (struct dbg_idle_chk_result_reg_hdr *)
+ dump_buf;
+ bool is_mem =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ u8 reg_id =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+ /* Skip reg header */
+ dump_buf +=
+ (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+
+ /* Skip register names until the required reg_id is
+ * reached.
+ */
+ for (; reg_id > curr_reg_id;
+ curr_reg_id++,
+ parsing_str += strlen(parsing_str) + 1);
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " %s",
+ parsing_str);
+ if (i < hdr->num_dumped_cond_regs && is_mem)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "[%d]", hdr->mem_entry_id +
+ reg_hdr->start_entry);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "=");
+ for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "0x%x", *dump_buf);
+ if (j < reg_hdr->size - 1)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ",");
+ }
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ /* Check if end of dump buffer was exceeded */
+ if (dump_buf > dump_buf_end)
+ return 0;
+ return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+ u32 num_section_params = 0, num_rules;
+ u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ *parsed_results_bytes = 0;
+ *num_errors = 0;
+ *num_warnings = 0;
+ if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read idle_chk section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &num_rules);
+ if (strcmp(param_name, "num_rules") != 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ if (num_rules) {
+ u32 rules_print_size;
+
+ /* Print FW output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "FW_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ true,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print LSI output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nLSI_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ false,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ }
+
+ /* Print errors/warnings count */
+ if (*num_errors) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+ *num_errors, *num_warnings);
+ } else if (*num_warnings) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly (with %d warnings)\n",
+ *num_warnings);
+ } else {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly\n");
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+/* Frees the specified MCP Trace meta data */
+static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
+ struct mcp_trace_meta *meta)
+{
+ u32 i;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf,
+ struct mcp_trace_meta *meta)
+{
+ u8 *meta_buf_bytes = (u8 *)meta_buf;
+ u32 offset = 0, signature, i;
+
+ memset(meta, 0, sizeof(*meta));
+
+ /* Read first signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of modules and allocate memory for all the modules
+ * pointers.
+ */
+ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+ meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+ if (!meta->modules)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all module strings */
+ for (i = 0; i < meta->modules_num; i++) {
+ u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+ *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+ if (!(*(meta->modules + i))) {
+ /* Update number of modules to be released */
+ meta->modules_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+ *(meta->modules + i));
+ if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+ (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+ }
+
+ /* Read second signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of formats and allocate memory for all formats */
+ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ meta->formats = kzalloc(meta->formats_num *
+ sizeof(struct mcp_trace_format),
+ GFP_KERNEL);
+ if (!meta->formats)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all strings */
+ for (i = 0; i < meta->formats_num; i++) {
+ struct mcp_trace_format *format_ptr = &meta->formats[i];
+ u8 format_len;
+
+ format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+ &offset);
+ format_len =
+ (format_ptr->data &
+ MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+ if (!format_ptr->format_str) {
+ /* Update number of modules to be released */
+ meta->formats_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes,
+ &offset,
+ format_len, format_ptr->format_str);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_mask, param_shift, param_num_val;
+ u32 num_section_params, offset, end_offset, bytes_left;
+ const char *section_name, *param_name, *param_str_val;
+ u32 trace_data_dwords, trace_meta_dwords;
+ struct mcp_trace_meta meta;
+ struct mcp_trace *trace;
+ enum dbg_status status;
+ const u32 *meta_buf;
+ u8 *trace_buf;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read trace_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_data_dwords = param_num_val;
+
+ /* Prepare trace info */
+ trace = (struct mcp_trace *)dump_buf;
+ trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ offset = trace->trace_oldest;
+ end_offset = trace->trace_prod;
+ bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+ dump_buf += trace_data_dwords;
+
+ /* Read meta_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_meta"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size") != 0)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_meta_dwords = param_num_val;
+
+ /* Choose meta data buffer */
+ if (!trace_meta_dwords) {
+ /* Dump doesn't include meta data */
+ if (!s_mcp_trace_meta.ptr)
+ return DBG_STATUS_MCP_TRACE_NO_META;
+ meta_buf = s_mcp_trace_meta.ptr;
+ } else {
+ /* Dump includes meta data */
+ meta_buf = dump_buf;
+ }
+
+ /* Allocate meta data memory */
+ status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+ if (status != DBG_STATUS_OK)
+ goto free_mem;
+
+ /* Ignore the level and modules masks - just print everything that is
+ * already in the buffer.
+ */
+ while (bytes_left) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ MFW_TRACE_ENTRY_SIZE);
+ bytes_left -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx > meta.formats_num) {
+ u8 format_size =
+ (u8)((header &
+ MFW_TRACE_PRM_SIZE_MASK) >>
+ MFW_TRACE_PRM_SIZE_SHIFT);
+
+ if (bytes_left < format_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ offset = qed_cyclic_add(offset,
+ format_size, trace->size);
+ bytes_left -= format_size;
+ continue;
+ }
+
+ format_ptr = &meta.formats[format_idx];
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size =
+ (u8)((format_ptr->data &
+ param_mask) >> param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+ if (bytes_left < param_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ param_size);
+ bytes_left -= param_size;
+ }
+
+ format_level =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_LEVEL_MASK) >>
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ format_module =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_MODULE_MASK) >>
+ MCP_TRACE_FORMAT_MODULE_SHIFT);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ /* Print current message to results buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ meta.modules[format_module]);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ format_ptr->format_str, params[0], params[1],
+ params[2]);
+ }
+
+free_mem:
+ *parsed_results_bytes = results_offset + 1;
+ qed_mcp_trace_free_meta(p_hwfn, &meta);
+ return status;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct reg_fifo_element *elements;
+ u8 i, j, err_val, vf_val;
+ char vf_str[4];
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read reg_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "reg_fifo_data"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+ elements = (struct reg_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ bool err_printed = false;
+
+ /* Discover if element belongs to a VF or a PF */
+ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+ if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+ sprintf(vf_str, "%s", "N/A");
+ else
+ sprintf(vf_str, "%d", vf_val);
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ elements[i].data,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ACCESS)],
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].
+ data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PROTECTION)],
+ s_master_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_MASTER)]);
+
+ /* Print errors */
+ for (j = 0,
+ err_val = GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ERROR);
+ j < ARRAY_SIZE(s_reg_fifo_error_strs);
+ j++, err_val >>= 1) {
+ if (!(err_val & 0x1))
+ continue;
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ ", ");
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct igu_fifo_element *elements;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u8 i, j;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read igu_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "igu_fifo_data"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+ elements = (struct igu_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ /* dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ u64 dword12 =
+ ((u64)elements[i].dword2 << 32) | elements[i].dword1;
+ bool is_wr_cmd = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ bool is_pf = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ u16 cmd_addr = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ u8 source = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ u8 err_type = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+ const struct igu_fifo_addr_data *addr_data = NULL;
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
+ j++)
+ if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
+ cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
+ addr_data = &s_igu_fifo_addr_data[j];
+ if (!addr_data)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (addr_data->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data,
+ " vector_num=0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB=0x%x", cmd_addr - addr_data->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ /* Prepare parsed write data */
+ if (is_wr_cmd) {
+ u32 wr_data = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ u32 prod_cons = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_PROD_CONS);
+ u8 is_cleanup = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data,
+ "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ u8 cleanup_type = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ u8 en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ u8 segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ u8 timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb ==
+ 1 ? "disable" : "nop") :
+ "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+ } else {
+ parsed_wr_data[0] = '\0';
+ }
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
+ elements[i].dword2, elements[i].dword1,
+ elements[i].dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd", cmd_addr,
+ (!is_pf && addr_data->vf_desc)
+ ? addr_data->vf_desc : addr_data->desc,
+ parsed_addr_data, parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct protection_override_element *elements;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read protection_override_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "protection_override_data"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ elements = (struct protection_override_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ u32 address = GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ i, address,
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE),
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "protection override contained %d elements",
+ num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, num_section_params, param_num_val, i;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+ while (!last_section_found) {
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name,
+ &num_section_params);
+ if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ continue;
+ } else if (strcmp(section_name, "fw_asserts")) {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ /* Extract params */
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 num_errors, num_warnnings;
+
+ return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+ results_buf, &num_errors,
+ &num_warnnings);
+}
+
+/* Feature meta data lookup table */
+static struct {
+ char *name;
+ enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *size);
+ enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ u32 buf_size, u32 *dumped_dwords);
+ enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 num_dumped_dwords,
+ char *results_buf);
+ enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+} qed_features_lookup[] = {
+ {
+ "grc", qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL}, {
+ "idle_chk",
+ qed_dbg_idle_chk_get_dump_buf_size,
+ qed_dbg_idle_chk_dump,
+ qed_print_idle_chk_results_wrapper,
+ qed_get_idle_chk_results_buf_size}, {
+ "mcp_trace",
+ qed_dbg_mcp_trace_get_dump_buf_size,
+ qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+ qed_get_mcp_trace_results_buf_size}, {
+ "reg_fifo",
+ qed_dbg_reg_fifo_get_dump_buf_size,
+ qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+ qed_get_reg_fifo_results_buf_size}, {
+ "igu_fifo",
+ qed_dbg_igu_fifo_get_dump_buf_size,
+ qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+ qed_get_igu_fifo_results_buf_size}, {
+ "protection_override",
+ qed_dbg_protection_override_get_dump_buf_size,
+ qed_dbg_protection_override_dump,
+ qed_print_protection_override_results,
+ qed_get_protection_override_results_buf_size}, {
+ "fw_asserts",
+ qed_dbg_fw_asserts_get_dump_buf_size,
+ qed_dbg_fw_asserts_dump,
+ qed_print_fw_asserts_results,
+ qed_get_fw_asserts_results_buf_size},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+ u32 i, precision = 80;
+
+ if (!p_text_buf)
+ return;
+
+ pr_notice("\n%.*s", precision, p_text_buf);
+ for (i = precision; i < text_size; i += precision)
+ pr_cont("%.*s", precision, p_text_buf + i);
+ pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 text_size_bytes, null_char_pos, i;
+ enum dbg_status rc;
+ char *text_buf;
+
+ /* Check if feature supports formatting capability */
+ if (!qed_features_lookup[feature_idx].results_buf_size)
+ return DBG_STATUS_OK;
+
+ /* Obtain size of formatted output */
+ rc = qed_features_lookup[feature_idx].
+ results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, &text_size_bytes);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Make sure that the allocated size is a multiple of dword (4 bytes) */
+ null_char_pos = text_size_bytes - 1;
+ text_size_bytes = (text_size_bytes + 3) & ~0x3;
+
+ if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "formatted size of feature was too small %d. Aborting\n",
+ text_size_bytes);
+ return DBG_STATUS_INVALID_ARGS;
+ }
+
+ /* Allocate temp text buf */
+ text_buf = vzalloc(text_size_bytes);
+ if (!text_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Decode feature opcodes to string on temp buf */
+ rc = qed_features_lookup[feature_idx].
+ print_results(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, text_buf);
+ if (rc != DBG_STATUS_OK) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Replace the original null character with a '\n' character.
+ * The bytes that were added as a result of the dword alignment are also
+ * padded with '\n' characters.
+ */
+ for (i = null_char_pos; i < text_size_bytes; i++)
+ text_buf[i] = '\n';
+
+ /* Dump printable feature to log */
+ if (p_hwfn->cdev->dbg_params.print_data)
+ qed_dbg_print_feature(text_buf, text_size_bytes);
+
+ /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ * and formatted text buffer.
+ */
+ vfree(feature->dump_buf);
+ feature->dump_buf = text_buf;
+ feature->buf_size = text_size_bytes;
+ feature->dumped_dwords = text_size_bytes / 4;
+ return rc;
+}
+
+/* Generic function for performing the dump of a debug feature. */
+enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+ qed_features_lookup[feature_idx].name);
+
+ /* Dump_buf was already allocated need to free (this can happen if dump
+ * was called but file was never read).
+ * We can't use the buffer as is since size may have changed.
+ */
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+
+ /* Get buffer size from hsi, allocate accordingly, and perform the
+ * dump.
+ */
+ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+ feature->buf_size = buf_size_dwords * sizeof(u32);
+ feature->dump_buf = vmalloc(feature->buf_size);
+ if (!feature->dump_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ rc = qed_features_lookup[feature_idx].
+ perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
+ feature->buf_size / sizeof(u32),
+ &feature->dumped_dwords);
+
+ /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+ * In this case the buffer holds valid binary data, but we wont able
+ * to parse it (since parsing relies on data in NVRAM which is only
+ * accessible when MFW is responsive). skip the formatting but return
+ * success so that binary data is provided.
+ */
+ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return DBG_STATUS_OK;
+
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Format output */
+ rc = format_feature(p_hwfn, feature_idx);
+ return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+ num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+ num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_FEATURE_SHIFT 24
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+enum debug_print_features {
+ OLD_MODE = 0,
+ IDLE_CHK = 1,
+ GRC_DUMP = 2,
+ MCP_TRACE = 3,
+ REG_FIFO = 4,
+ PROTECTION_OVERRIDE = 5,
+ IGU_FIFO = 6,
+ PHY = 7,
+ FW_ASSERTS = 8,
+};
+
+static u32 qed_calc_regdump_header(enum debug_print_features feature,
+ int engine, u32 feature_size, u8 omit_engine)
+{
+ /* Insert the engine, feature and mode inside the header and combine it
+ * with feature size.
+ */
+ return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
+ (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
+ (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+ u8 cur_engine, omit_engine = 0, org_engine;
+ u32 offset = 0, feature_size;
+ int rc;
+
+ if (cdev->num_hwfns == 1)
+ omit_engine = 1;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Collect idle_chks and grcDump for each hw function */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "obtaining idle_chk and grcdump for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+
+ /* First idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* Second idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* reg_fifo dump */
+ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(REG_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+ }
+
+ /* igu_fifo dump */
+ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+ }
+
+ /* protection_override dump */
+ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE,
+ &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev,
+ "qed_dbg_protection_override failed. rc = %d\n",
+ rc);
+ }
+
+ /* fw_asserts dump */
+ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(FW_ASSERTS, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+ rc);
+ }
+
+ /* GRC dump - must be last because when mcp stuck it will
+ * clutter idle_chk, reg_fifo, ...
+ */
+ rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+ }
+ }
+
+ /* mcp_trace */
+ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+ u8 cur_engine, org_engine;
+ u32 regs_len = 0;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Engine specific */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "calculating idle_chk and grcdump register length for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+ }
+
+ /* Engine common */
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_set_debug_engine(cdev, org_engine);
+
+ return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ enum dbg_status dbg_rc;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ /* Acquire ptt */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EINVAL;
+
+ /* Get dump */
+ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+ if (dbg_rc != DBG_STATUS_OK) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+ qed_dbg_get_status_str(dbg_rc));
+ *num_dumped_bytes = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "copying debugfs feature to external buffer\n");
+ memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+ *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ 4;
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ if (!p_ptt)
+ return -EINVAL;
+
+ rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ buf_size_dwords = 0;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+ return qed_feature->buf_size;
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+ return cdev->dbg_params.engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+ engine_number);
+ cdev->dbg_params.engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+ const u8 *dbg_values;
+
+ /* Debug values are after init values.
+ * The offset is the first dword of the file.
+ */
+ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+ qed_dbg_set_bin_ptr((u8 *)dbg_values);
+ qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+ struct qed_dbg_feature *feature = NULL;
+ enum qed_dbg_features feature_idx;
+
+ /* Debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called.
+ */
+ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+ feature = &cdev->dbg_params.features[feature_idx];
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644
index 000000000000..f872d7324814
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -0,0 +1,54 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEBUGFS_H
+#define _QED_DEBUGFS_H
+
+enum qed_dbg_features {
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_IGU_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_NUM
+};
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0e4f4a9306b5..754f6a908858 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,14 +29,18 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+#include "qed_roce.h"
-static spinlock_t qm_lock;
-static bool qm_lock_init = false;
+static DEFINE_SPINLOCK(qm_lock);
+
+#define QED_MIN_DPIS (4)
+#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
/* API common to all protocols */
enum BAR_ID {
@@ -44,8 +48,7 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -70,8 +73,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
}
}
-void qed_init_dp(struct qed_dev *cdev,
- u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
{
u32 i;
@@ -150,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+ qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -343,7 +348,6 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
return 0;
alloc_err:
- DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
@@ -407,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
+#ifdef CONFIG_QED_LL2
+ struct qed_ll2_info *p_ll2_info;
+#endif
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
@@ -427,18 +434,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
RESC_NUM(p_hwfn, QED_L2_QUEUE);
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Tx Cids\n");
+ if (!p_hwfn->p_tx_cids)
goto alloc_no_mem;
- }
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Rx Cids\n");
+ if (!p_hwfn->p_rx_cids)
goto alloc_no_mem;
- }
}
for_each_hwfn(cdev, i) {
@@ -523,29 +524,29 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2) {
+ p_ll2_info = qed_ll2_alloc(p_hwfn);
+ if (!p_ll2_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ll2_info = p_ll2_info;
+ }
+#endif
+
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dmae_info structure\n");
+ if (rc)
goto alloc_err;
- }
/* DCBX initialization */
rc = qed_dcbx_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dcbx structure\n");
+ if (rc)
goto alloc_err;
- }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
- if (!cdev->reset_stats) {
- DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!cdev->reset_stats)
+ goto alloc_no_mem;
return 0;
@@ -580,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2)
+ qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
}
}
@@ -605,9 +610,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
- DP_NOTICE(
- p_hwfn,
- "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ DP_NOTICE(p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -701,17 +705,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_block->function_id,
- 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
- sb_entry);
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
}
}
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+ struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
@@ -759,7 +760,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_port_unpretend(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
- if (rc != 0)
+ if (rc)
return rc;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -780,6 +781,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
@@ -787,38 +791,141 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- int rc = 0;
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
- if (rc != 0)
- return rc;
+ /* Calculate DPI size */
+ dpi_page_size_1 = QED_WID_SIZE * n_cpus;
+ dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
+ dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ dpi_bit_shift = ilog2(dpi_page_size / 4096);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
+ dpi_count = pwm_region_size / dpi_page_size;
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%08x] is first eth on engine\n", pf_id);
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
- /* We should have configured BIT for ppfid, i.e., the
- * relative function number in the port. But there's a
- * bug in LLH in BB where the ppfid is actually engine
- * based, so we need to take this into account.
- */
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
- }
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return -EINVAL;
+
+ return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+ QED_ROCE_EDPM_MODE_ENABLE = 0,
+ QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+ QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
- /* Take the protocol-based hit vector if there is a hit,
- * otherwise take the other vector.
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ int rc = 0;
+ u8 cond;
+
+ db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->cdev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions */
+ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ NULL) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ NULL);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->cdev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ if (pwm_regsize < QED_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->cdev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize,
+ QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
*/
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ n_cpus = num_active_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- return rc;
+
+ cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ if (cond)
+ qed_rdma_dpm_bar(p_hwfn, p_ptt);
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ norm_regsize,
+ pwm_regsize,
+ p_hwfn->dpi_size,
+ p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+ p_hwfn->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis);
+ return -EINVAL;
+ }
+
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return 0;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int hw_mode)
+{
+ return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
+ p_hwfn->port_id, hw_mode);
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -848,7 +955,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_init_rt(p_hwfn);
/* Set VLAN in NIG if needed */
- if (hw_mode & (1 << MODE_MF_SD)) {
+ if (hw_mode & BIT(MODE_MF_SD)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -856,7 +963,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Enable classification by MAC if needed */
- if (hw_mode & (1 << MODE_MF_SI)) {
+ if (hw_mode & BIT(MODE_MF_SI)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Configuring TAGMAC_CLS_TYPE\n");
STORE_RT_REG(p_hwfn,
@@ -871,7 +978,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != 0)
+ if (rc)
return rc;
/* PF Init sequence */
@@ -887,20 +994,9 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
- u32 val = 0;
-
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- if (p_hwfn->rel_pf_id == pf_id) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%d] is first ETH on engine\n",
- pf_id);
- val = 1;
- }
- qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
- }
- }
+ rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
if (b_hw_start) {
/* enable interrupts */
@@ -950,8 +1046,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_main_ptt);
memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
- p_hwfn->mcp_info->mfw_mb_cur,
- p_hwfn->mcp_info->mfw_mb_length);
+ p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
int qed_hw_init(struct qed_dev *cdev,
@@ -971,7 +1066,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
+ if (rc)
return rc;
}
@@ -988,8 +1083,7 @@ int qed_hw_init(struct qed_dev *cdev,
qed_calc_hw_mode(p_hwfn);
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
- &load_code);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
return rc;
@@ -1004,11 +1098,6 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
- spin_lock_init(&qm_lock);
- qm_lock_init = true;
- }
-
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1071,9 +1160,8 @@ int qed_hw_init(struct qed_dev *cdev,
}
#define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int i;
@@ -1084,8 +1172,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
+ (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
/* Dependent on number of connection/tasks, possibly
@@ -1190,8 +1277,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
}
DP_VERBOSE(p_hwfn,
- NETIF_MSG_IFDOWN,
- "Shutting down the fastpath\n");
+ NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1219,14 +1305,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static int qed_reg_assert(struct qed_hwfn *hwfn,
- struct qed_ptt *ptt, u32 reg,
- bool expected)
+static int qed_reg_assert(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 reg, bool expected)
{
- u32 assert_val = qed_rd(hwfn, ptt, reg);
+ u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
if (assert_val != expected) {
- DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
reg, expected);
return -EINVAL;
}
@@ -1306,8 +1391,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1317,7 +1401,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -1326,6 +1411,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
PXP_CONCRETE_FID_PFID);
p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
}
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
@@ -1333,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
+ * status blocks equally between L2 / RoCE but with consideration as
+ * to how many l2 queues / cnqs we have
+ */
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_features++;
+
+ feat_num[QED_RDMA_CNQ] =
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+ }
+#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
@@ -1373,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
+ resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
+ num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
resc_start[i] = resc_num[i] * enabled_func_idx;
@@ -1396,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
"RL = %d start = %d\n"
"MAC = %d start = %d\n"
"VLAN = %d start = %d\n"
- "ILT = %d start = %d\n",
+ "ILT = %d start = %d\n"
+ "LL2_QUEUE = %d start = %d\n",
p_hwfn->hw_info.resc_num[QED_SB],
p_hwfn->hw_info.resc_start[QED_SB],
p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
@@ -1412,13 +1519,14 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_num[QED_VLAN],
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
- p_hwfn->hw_info.resc_start[QED_ILT]);
+ p_hwfn->hw_info.resc_start[QED_ILT],
+ RESC_NUM(p_hwfn, QED_LL2_QUEUE),
+ RESC_START(p_hwfn, QED_LL2_QUEUE));
return 0;
}
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
@@ -1472,8 +1580,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
- core_cfg);
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
}
@@ -1484,11 +1591,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port, speed_cap_mask));
- link->speed.advertised_speeds =
- link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities =
- link->speed.advertised_speeds;
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
@@ -1517,8 +1624,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link->speed.forced_speed = 100000;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
- link_temp);
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1628,10 +1734,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
- p_hwfn->num_funcs_on_engine);
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
static int
@@ -1703,10 +1809,9 @@ static int qed_get_dev_info(struct qed_dev *cdev)
u32 tmp;
/* Read Vendor Id / Device Id */
- pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
- &cdev->vendor_id);
- pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
- &cdev->device_id);
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -1771,10 +1876,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ if (rc)
goto err0;
- }
/* Allocate the main PTT */
p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
@@ -1782,7 +1885,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
- if (rc != 0)
+ if (rc)
goto err1;
}
@@ -1804,10 +1907,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ if (rc)
goto err2;
- }
return rc;
err2:
@@ -2015,10 +2116,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2048,10 +2147,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
@@ -2068,13 +2165,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
void *p_virt = NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vmalloc(size);
- if (!pp_virt_addr_tbl) {
- DP_NOTICE(cdev,
- "Failed to allocate memory for the chain virtual addresses table\n");
+ pp_virt_addr_tbl = vzalloc(size);
+ if (!pp_virt_addr_tbl)
return -ENOMEM;
- }
- memset(pp_virt_addr_tbl, 0, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -2087,19 +2180,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
size, &p_pbl_phys, GFP_KERNEL);
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ if (!p_pbl_virt)
return -ENOMEM;
- }
for (i = 0; i < page_cnt; i++) {
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2134,7 +2223,8 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
if (rc) {
DP_NOTICE(cdev,
- "Cannot allocate a chain with the given arguments:\n"
+ "Cannot allocate a chain with the given arguments:\n");
+ DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
@@ -2183,8 +2273,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
return 0;
}
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
u8 min, max;
@@ -2203,8 +2292,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
u8 min, max;
@@ -2223,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
+ u8 *p_filter)
+{
+ *p_high = p_filter[1] | (p_filter[0] << 8);
+ *p_low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+}
+
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return 0;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an empty LLH filter to utilize\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is added at %d\n",
+ p_filter, i);
+
+ return 0;
+}
+
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is removed from %d\n",
+ p_filter, i);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
@@ -2386,8 +2566,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
* 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
*/
static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
- u16 vport_id, u32 req_rate,
- u32 min_pf_rate)
+ u16 vport_id, u32 req_rate, u32 min_pf_rate)
{
u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2471,7 +2650,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
- if (rc == 0)
+ if (!rc)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 343bb0344f62..b6711c106597 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
+ * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
* *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 6f9d3b831a2a..72eee29c677f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -536,6 +536,247 @@ struct core_conn_context {
struct regpair ustorm_st_padding[2];
};
+enum core_error_handle {
+ LL2_DROP_PACKET,
+ LL2_DO_NOTHING,
+ LL2_ASSERT,
+ MAX_CORE_ERROR_HANDLE
+};
+
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ MAX_CORE_EVENT_OPCODE
+};
+
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+};
+
+struct core_ll2_rx_prod {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+ __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START,
+ CORE_RAMROD_TX_QUEUE_START,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd;
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+struct core_rx_cqe_opaque_data {
+ __le32 data[2];
+};
+
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_TYPE_REGULAR,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+ CORE_RX_CQE_TYPE_SLOW_PATH,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+struct core_rx_fast_path_cqe {
+ u8 type;
+ u8 placement_offset;
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length;
+ __le16 vlan;
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved[4];
+};
+
+struct core_rx_gsi_offload_cqe {
+ u8 type;
+ u8 data_length_error;
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length;
+ __le16 vlan;
+ __le32 src_mac_addrhi;
+ __le16 src_mac_addrlo;
+ u8 reserved1[2];
+ __le32 gid_dst[4];
+};
+
+struct core_rx_slow_path_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ __le32 reserved1[7];
+};
+
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+ struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base;
+ struct regpair cqe_pbl_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 drop_ttl0_flg;
+ __le16 num_of_pbl_pages;
+ u8 inner_vlan_removal_en;
+ u8 queue_id;
+ u8 main_func_queue;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag;
+ u8 reserved[7];
+};
+
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 queue_id;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+struct core_tx_bd_flags {
+ u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
+
+};
+
+struct core_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 nw_vlan_or_lb_echo;
+ u8 bitfield0;
+#define CORE_TX_BD_NBDS_MASK 0xF
+#define CORE_TX_BD_NBDS_SHIFT 0
+#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
+#define CORE_TX_BD_RESERVED0_MASK 0x7
+#define CORE_TX_BD_RESERVED0_SHIFT 5
+ struct core_tx_bd_flags bd_flags;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED1_MASK 0x1
+#define CORE_TX_BD_RESERVED1_SHIFT 15
+};
+
+enum core_tx_dest {
+ CORE_TX_DEST_NW,
+ CORE_TX_DEST_LB,
+ MAX_CORE_TX_DEST
+};
+
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 stats_en;
+ u8 stats_id;
+ u8 conn_type;
+ __le16 pbl_size;
+ __le16 qm_pq_id;
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -636,9 +877,33 @@ struct hsi_fp_ver_struct {
};
/* Mstorm non-triggering VF zone */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID,
+ VF_ZONE_FUNC_NOT_ENABLED,
+ ETH_PACKET_TOO_SMALL,
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION,
+ ETH_ILLEGAL_INBAND_TAGS,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP,
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC,
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/* Mstorm VF zone */
@@ -705,13 +970,17 @@ struct pf_start_ramrod_data {
struct protocol_dcb_data {
u8 dcb_enable_flag;
+ u8 reserved_a;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved;
+ u8 reserved_b;
+ u8 reserved0;
};
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
+ u8 update_rx_def_ucast_clss;
+ u8 update_rx_def_non_ucast_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
@@ -727,7 +996,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[3];
+ __le16 reserved[2];
};
struct pf_update_ramrod_data {
@@ -736,16 +1005,17 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved2;
+ __le16 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -766,10 +1036,14 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
-/* Pstorm non-triggering VF zone */
+struct rdma_sent_stats {
+ struct regpair sent_bytes;
+ struct regpair sent_pkts;
+};
+
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
- struct regpair reserved[2];
+ struct rdma_sent_stats rdma_stats;
};
/* Pstorm VF zone */
@@ -786,7 +1060,11 @@ struct ramrod_header {
__le16 echo;
};
-/* Slowpath Element (SPQE) */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes;
+ struct regpair rcv_pkts;
+};
+
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -794,7 +1072,7 @@ struct slow_path_element {
/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {
- struct regpair reserved[2];
+ struct rdma_rcv_stats rdma_stats;
};
struct tstorm_per_port_stat {
@@ -802,9 +1080,14 @@ struct tstorm_per_port_stat {
struct regpair mac_error_discard;
struct regpair mftag_filter_discard;
struct regpair eth_mac_filter_discard;
- struct regpair reserved[5];
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair reserved;
+ struct regpair roce_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1[2];
+ struct regpair reserved1;
+ struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
@@ -870,7 +1153,13 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
-/* Attentions status block */
+enum vf_zone_size_mode {
+ VF_ZONE_SIZE_MODE_DEFAULT,
+ VF_ZONE_SIZE_MODE_DOUBLE,
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1442,13 +1731,6 @@ enum bin_dbg_buffer_type {
MAX_BIN_DBG_BUFFER_TYPE
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_RESERVED,
- CHIP_BB_B0,
- CHIP_RESERVED2,
- MAX_CHIP_IDS
-};
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
@@ -1527,6 +1809,371 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+/* condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* memory data for registers dump */
+struct dbg_dump_mem {
+ __le32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+};
+
+/* register data for registers dump */
+struct dbg_dump_reg {
+ __le32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* split header for registers dump */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ __le16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries; /* number of registers entries to check */
+ u8 entry_size; /* size of registers entry (in dwords) */
+ u8 start_entry; /* index of the first entry to check */
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id; /* Failing rule index */
+ __le16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ __le16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ __le16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ __le16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ __le16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
+ u8 hw_id; /* HW ID associated with the block */
+ u8 line_num; /* Debug line number to select */
+ u8 right_shift; /* Number of units to right the debug data (0-3) */
+ u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
+ u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
+ u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
+ */
+ u8 reserved;
+};
+
+/* Debug Bus Clients */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ __le32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 fast_enabled;
+ u8 fast_mode;
+ u8 slow_enabled;
+ u8 slow_mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+ __le32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ __le32 app_version; /* The tools version number of the application */
+ u8 state; /* The current debug bus state */
+ u8 hw_dwords; /* HW dwords per cycle */
+ u8 next_hw_id; /* Next HW ID to be associated with an input */
+ u8 num_enabled_blocks; /* Number of blocks enabled for recording */
+ u8 num_enabled_storms; /* Number of Storms enabled for recording */
+ u8 target; /* Output target */
+ u8 next_trigger_state; /* ID of next trigger state to be added */
+ u8 next_constraint_id; /* ID of next filter/trigger constraint to be
+ * added.
+ */
+ u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
+ u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
+ u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
+ * (0/1).
+ */
+ u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
+ u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
+ u8 adding_filter; /* If true, the next added constraint belong to the
+ * filter. Otherwise, it belongs to the last added
+ * trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 filter_pre_trigger; /* Indicates if the recording filter should be
+ * applied before the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 filter_post_trigger; /* Indicates if the recording filter should be
+ * applied after the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
+ * Otherwise, each input is assigned a different HW ID
+ * (0/1).
+ */
+ u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
+ * recording to this engine (0/1).
+ */
+ struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
+ * only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ __le16 reserved;
+ struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
+ DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
+ * recording.
+ */
+ DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
+ DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ /* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ /* records debug bus to the NW */
+ DBG_BUS_TARGET_ID_NIG,
+ /* records debug bus to a PCI buffer */
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ __le32 param_val[40]; /* Value of each GRC parameter. Array size must
+ * match the enum dbg_grc_params.
+ */
+ u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
+ * set by the user (0/1). Array size must
+ * match the enum dbg_grc_params.
+ */
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
+ DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
+ DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
+ DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
+ DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
+ DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
+ DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
+ DBG_GRC_PARAM_RESERVED, /* reserved */
+ DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
+ DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
+ DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
+ DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
+ DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
+ DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
+ /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ /* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+ /* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug reset registers */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -1579,9 +2226,45 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
MAX_DBG_STATUS
};
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ __le32 buf_size; /* Idle check buffer size in dwords */
+ u8 buf_size_set; /* Indicates if the idle check buffer size was set
+ * (0/1).
+ */
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* Debug Tools data (per HW function) */
+struct dbg_tools_data {
+ struct dbg_grc_data grc; /* GRC Dump data */
+ struct dbg_bus_data bus; /* Debug Bus data */
+ struct idle_chk_data idle_chk; /* Idle Check data */
+ u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
+ u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ */
+ u8 chip_id; /* Chip ID (from enum chip_ids) */
+ u8 platform_id; /* Platform ID (from enum platform_ids) */
+ u8 initialized; /* Indicates if the data was initialized */
+ u8 reserved;
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -1589,7 +2272,41 @@ enum dbg_status {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
-/* QM per-port init parameters */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc;
+ __le32 headroom_per_tc;
+ __le32 min_pkt_size;
+ __le32 max_ports_per_engine;
+ u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ __le16 weight;
+};
+
+struct init_ets_req {
+ __le32 mtu;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+struct init_nig_lb_rl_req {
+ __le16 lb_mac_rate;
+ __le16 lb_rate;
+ __le32 mtu;
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id;
+ u8 valid;
+};
+
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -1619,7 +2336,7 @@ struct init_qm_vport_params {
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
@@ -1627,15 +2344,50 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+struct fw_asserts_ram_section {
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
+ u8 list_dword_offset;
+ u8 list_element_dword_size;
+ u8 list_num_elements;
+ u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+ u8 major; /* Firmware major version number */
+ u8 minor; /* Firmware minor version number */
+ u8 rev; /* Firmware revision version number */
+ u8 eng; /* Firmware engineering version number (for bootleg versions) */
+};
+
+struct fw_ver_info {
+ __le16 tools_ver; /* Tools version number */
+ u8 image_id; /* FW image ID (e.g. main) */
+ u8 reserved1;
+ struct fw_ver_num num; /* FW version number */
+ __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver;
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+ __le32 grc_addr;
+ __le32 size;
+};
+
enum init_modes {
MODE_RESERVED,
MODE_BB_B0,
- MODE_RESERVED2,
+ MODE_K2,
MODE_ASIC,
+ MODE_RESERVED2,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
- MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -1644,7 +2396,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_40G,
- MODE_RESERVED7,
+ MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -1674,11 +2426,11 @@ struct bin_buffer_hdr {
/* binary init buffer types */
enum bin_init_buffer_type {
- BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
- BIN_BUF_IRO,
+ BIN_BUF_INIT_IRO,
MAX_BIN_INIT_BUFFER_TYPE
};
@@ -1902,8 +2654,276 @@ struct iro {
__le16 size;
};
+/***************************** Public Functions *******************************/
/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the collected GRC data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified dump buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+ * for idle check results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the idle check data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the mcp trace data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * - the trace meta data cannot be read (from NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the reg fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the IGU fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+ * buffer size for protection override window results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for protection
+ * override data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_protection_override_dump - Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the protection override data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the FW Asserts data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the
+ * specified results struct.
*
* @param p_hwfn
* @param results - Pointer to the attention read results
@@ -1915,47 +2935,241 @@ struct iro {
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
+/******************************** Constants **********************************/
+
#define MAX_NAME_LEN 16
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_get_status_str - Returns a string for the specified status.
+ *
+ * @param status - a debug status code.
+ *
+ * @return a string for the specified status
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+/**
+ * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_idle_chk_results - Prints idle check results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the idle check results.
+ * @param num_errors - OUT: number of errors found in idle check.
+ * @param num_warnings - OUT: number of warnings found in idle check.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+/**
+ * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - MCP Trace dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_mcp_trace_results - Prints MCP Trace results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_reg_fifo_results - Prints reg fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_igu_fifo_results - Prints IGU fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the IGU fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_protection_override_results_buf_size - Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_protection_override_results - Prints protection override
+ * results.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_fw_asserts_results - Prints FW Asserts results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the FW Asserts results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
/* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD \
- 0x00f000UL
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM \
- 0x010000UL
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM \
- 0x011000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
- 0x012000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM \
- 0x013000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
- 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
- 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM \
- 0x016000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM \
- 0x017000UL
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM \
- 0x018000UL
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -2003,7 +3217,7 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u8 start_vport;
u8 num_vports;
- u8 pf_wfq;
+ u16 pf_wfq;
u32 pf_rl;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
@@ -2138,6 +3352,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
@@ -2153,42 +3370,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[21].base + ((pf_id) * IRO[21].m1))
+ (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+ (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[23].base + ((pf_id) * IRO[23].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+ (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[25].base + ((pf_id) * IRO[25].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+ (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[26].base + ((ethtype) * IRO[26].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+ (IRO[27].base + ((ethtype) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[29].base + ((queue_id) * IRO[29].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
-
-static const struct iro iro_arr[46] = {
+ (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x78, 0x0, 0x0, 0x78},
{0x6318, 0x20, 0x0, 0x0, 0x20},
@@ -2201,20 +3466,21 @@ static const struct iro iro_arr[46] = {
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x4990, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x60f8, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xc9a8, 0x0, 0x0, 0x0, 0x4},
- {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x53a0, 0x80, 0x4, 0x0, 0x4},
+ {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba0, 0x80, 0x0, 0x0, 0x20},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0xf188, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xacf0, 0x0, 0x0, 0x0, 0xf0},
{0xade0, 0x8, 0x0, 0x0, 0x8},
@@ -2226,455 +3492,457 @@ static const struct iro iro_arr[46] = {
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
{0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12120, 0x10, 0x0, 0x0, 0x8},
- {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0x12c38, 0x10, 0x0, 0x0, 0x8},
+ {0x11aa0, 0x38, 0x0, 0x0, 0x18},
{0xa8c0, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
- {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
- {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
-
-#define RUNTIME_ARRAY_SIZE 33925
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30851
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+
+#define RUNTIME_ARRAY_SIZE 33927
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -3201,7 +4469,31 @@ struct eth_conn_context {
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
-/* opcodes for the event ring */
+enum eth_error_code {
+ ETH_OK = 0x00,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ MAX_ETH_ERROR_CODE
+};
+
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -3269,7 +4561,13 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
-/* Ethernet Ramrod Command IDs */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG,
+ ETH_IPV4_FIRST_FRAG,
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -3451,8 +4749,8 @@ struct rx_queue_start_ramrod_data {
u8 toggle_val;
u8 vf_rx_prod_index;
-
- u8 reserved[6];
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
__le16 reserved1;
struct regpair cqe_pbl_addr;
struct regpair bd_base;
@@ -3526,10 +4824,11 @@ struct tx_queue_start_ramrod_data {
__le16 pxp_st_index;
__le16 comp_agg_size;
__le16 queue_zone_id;
- __le16 test_dup_count;
+ __le16 reserved2;
__le16 pbl_size;
__le16 tx_queue_id;
-
+ __le16 same_as_last_id;
+ __le16 reserved[3];
struct regpair pbl_base_addr;
struct regpair bd_cons_address;
};
@@ -4926,8 +6225,8 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
@@ -4988,6 +6287,10 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+};
+
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
@@ -6639,6 +7942,35 @@ struct ystorm_iscsi_conn_ag_ctx {
__le32 reg2;
__le32 reg3;
};
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
#define VF_MAX_STATIC 192
#define MCP_GLOB_PATH_MAX 2
@@ -6646,6 +7978,7 @@ struct ystorm_iscsi_conn_ag_ctx {
#define MCP_GLOB_PORT_MAX 4
#define MCP_GLOB_FUNC_MAX 16
+typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
@@ -7236,8 +8569,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
@@ -7248,6 +8592,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
@@ -7285,6 +8632,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -7315,10 +8664,10 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_BW_UPDATE5,
- MFW_DRV_MSG_BW_UPDATE6,
- MFW_DRV_MSG_BW_UPDATE7,
- MFW_DRV_MSG_BW_UPDATE8,
- MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
@@ -7521,4 +8870,101 @@ struct nvm_cfg1 {
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define MCP_TRACE_SIZE 2048 /* 2kb */
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ struct mcp_trace trace;
+#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
+ u8 trace_buffer[MCP_TRACE_SIZE];
+#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
+ /* running_mfw has the same definition as in nvm_map.h.
+ * This bit indicate both the running dir, and the running bundle.
+ * It is set once when the LIM is loaded.
+ */
+ u32 running_mfw;
+#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
+ u32 build_time;
+#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
+ u32 reset_type;
+#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
+ u32 mfw_secure_mode;
+#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
+ u16 pme_status_pf_bitmap;
+#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
+ u16 pme_enable_pf_bitmap;
+#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
+ u32 mim_nvm_addr;
+ u32 mim_start_addr;
+ u32 ah_pcie_link_params;
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
+#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
+
+ u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_MAX,
+};
+
+#define DIR_ID_1 (0)
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index e17885321faf..6e4fae9b1430 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,8 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_KERNEL);
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
int i;
if (!p_pool)
@@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
return NULL;
}
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
}
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
}
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 new_hw_addr)
+ struct qed_ptt *p_ptt, u32 new_hw_addr)
{
u32 prev_hw_addr;
@@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
}
static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr)
+ struct qed_ptt *p_ptt, u32 hw_addr)
{
u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
@@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *addr,
- u32 hw_addr,
- size_t n,
- bool to_device)
+ void *addr, u32 hw_addr, size_t n, bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
size_t quota, done = 0;
@@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- void *dest, u32 hw_addr, size_t n)
+ struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr, void *src, size_t n)
+ struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
{
u16 control = 0;
@@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
}
void qed_port_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 port_id)
+ struct qed_ptt *p_ptt, u8 port_id)
{
u16 control = 0;
@@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u16 control = 0;
@@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
int qed_status = 0;
/* verify address is not NULL */
- if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
- ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
return -EINVAL;
}
@@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
NETIF_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
@@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
- *(((u32 *)command) + i) : 0;
+ *(((u32 *)p_command) + i) : 0;
qed_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
@@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
(i * sizeof(u32)), data);
}
- qed_wr(p_hwfn, p_ptt,
- qed_dmae_idx_to_go_cmd(idx_cmd),
- DMAE_GO_VALUE);
+ qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
return qed_status;
}
@@ -498,31 +481,23 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(u32),
- p_addr,
- GFP_KERNEL);
- if (!*p_comp) {
- DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ sizeof(u32), p_addr, GFP_KERNEL);
+ if (!*p_comp)
goto err;
- }
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
p_addr, GFP_KERNEL);
- if (!*p_cmd) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ if (!*p_cmd)
goto err;
- }
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32) * DMAE_MAX_RW_SIZE,
p_addr, GFP_KERNEL);
- if (!*p_buff) {
- DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ if (!*p_buff)
goto err;
- }
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
@@ -543,8 +518,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
- p_hwfn->dmae_info.p_completion_word,
- p_phys);
+ p_hwfn->dmae_info.p_completion_word, p_phys);
p_hwfn->dmae_info.p_completion_word = NULL;
}
@@ -552,8 +526,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
- p_hwfn->dmae_info.p_dmae_cmd,
- p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd, p_phys);
p_hwfn->dmae_info.p_dmae_cmd = NULL;
}
@@ -571,9 +544,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
{
- u32 wait_cnt = 0;
- u32 wait_cnt_limit = 10000;
-
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
int qed_status = 0;
barrier();
@@ -606,7 +577,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
u64 dst_addr,
u8 src_type,
u8 dst_type,
- u32 length)
+ u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +595,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(uintptr_t)src_addr,
- length * sizeof(u32));
+ length_dw * sizeof(u32));
break;
default:
return -EINVAL;
@@ -645,7 +616,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length_dw = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length_dw);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -654,16 +625,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
- src_addr,
- dst_addr,
- length);
+ src_addr, dst_addr, length_dw);
return qed_status;
}
if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
memcpy((void *)(uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
- length * sizeof(u32));
+ length_dw * sizeof(u32));
return 0;
}
@@ -730,10 +699,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status,
- src_addr,
- dst_addr,
- length_cur);
+ qed_status, src_addr, dst_addr, length_cur);
break;
}
}
@@ -743,10 +709,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr,
- u32 size_in_dwords,
- u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -768,9 +731,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -791,12 +755,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
return rc;
}
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- dma_addr_t source_addr,
- dma_addr_t dest_addr,
- u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 9866a20d2128..d567ba94c8d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
p_hwfn->rt_data.b_valid[i] = false;
}
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset, u32 *p_val,
- size_t size)
+ u32 rt_offset, u32 *p_val, size_t size)
{
size_t i;
@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u16 rt_offset,
- u16 size,
- bool b_must_dmae)
+ u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
- qed_wr(p_hwfn, p_ptt, addr + (i << 2),
- p_init_val[i]);
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
- if (rc != 0)
+ if (rc)
return rc;
/* Jump over the entire segment, including invalid entry */
@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count,
- QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
u32 i;
@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
- bool b_must_dmae,
- bool b_can_dmae)
+ bool b_must_dmae, bool b_can_dmae)
{
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
- u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
array_data = cdev->fw_data->arr_data;
- hdr = (union init_array_hdr *)(array_data +
- dmae_array_offset);
+ hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct init_write_op *cmd,
- bool b_can_dmae)
+ struct init_write_op *p_cmd, bool b_can_dmae)
{
- u32 data = le32_to_cpu(cmd->data);
- u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 data = le32_to_cpu(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- union init_write_args *arg = &cmd->args;
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ union init_write_args *arg = &p_cmd->args;
int rc = 0;
/* Sanitize */
@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
- qed_wr(p_hwfn, p_ptt, addr,
- le32_to_cpu(arg->inline_val));
+ data = le32_to_cpu(p_cmd->args.inline_val);
+ qed_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
- if (b_must_dmae ||
- (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ data = le32_to_cpu(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
- qed_init_fill(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
- rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_read_op *cmd)
+ struct qed_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
- u16 *offset,
- int modes)
+ u16 *p_offset, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*offset)++];
+ tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
- return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
- return (modes & (1 << tree_val)) ? 1 : 0;
+ return (modes & BIT(tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
- struct init_if_mode_op *p_cmd,
- int modes)
+ struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
- u32 phase,
- u32 phase_id)
+ u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
}
int qed_init_run(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int phase,
- int phase_id,
- int modes)
+ struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
@@ -483,10 +460,8 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
init_ops = cdev->fw_data->init_ops;
p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
- if (!p_hwfn->unzip_buf) {
- DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ if (!p_hwfn->unzip_buf)
return -ENOMEM;
- }
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
@@ -557,7 +532,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
/* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
- offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 8fa50fa23c8d..2adedc6fb6cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1775,10 +1775,9 @@ struct qed_sb_attn_info {
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
- struct qed_sb_attn_info *p_sb_desc)
+ struct qed_sb_attn_info *p_sb_desc)
{
- u16 rc = 0;
- u16 index;
+ u16 rc = 0, index;
/* Make certain HW write took affect */
mmiowb();
@@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
* @param asserted_bits newly asserted bits
* @return int
*/
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
- u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
u32 igu_mask;
/* Mask the source of the attention in the IGU */
- igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if ((p_bit->flags & ATTENTION_PARITY) &&
- !!(parities & (1 << bit_idx)))
+ !!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
- aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
@@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn,
- "MFW indication via attention\n");
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
@@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
return rc;
}
- if (deasserted_bits) {
+ if (deasserted_bits)
rc = qed_int_deassertion(p_hwfn, deasserted_bits);
- if (rc)
- return rc;
- }
return rc;
}
static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
- void __iomem *igu_addr,
- u32 ack_cons)
+ void __iomem *igu_addr, u32 ack_cons)
{
struct igu_prod_cons_update igu_ack = { 0 };
@@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
- DP_ERR(
- p_hwfn->cdev,
- "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
@@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
}
if (!sb_attn || !sb_attn->sb_attn) {
- DP_ERR(
- p_hwfn->cdev,
- "Attentions Status block is NULL - cannot check for new attentions!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
@@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
if (p_sb->sb_attn)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
+ p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
}
@@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
int i, j, k;
@@ -2378,15 +2365,13 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_sb_attn_info *p_sb;
- void *p_virt;
dma_addr_t p_phys = 0;
+ void *p_virt;
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
@@ -2394,7 +2379,6 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2412,9 +2396,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
- u8 pf_id,
- u16 vf_number,
- u8 vf_valid)
+ u8 pf_id, u16 vf_number, u8 vf_valid)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
@@ -2428,12 +2410,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
- /* setting the time resultion to a fixed value ( = 1) */
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
- QED_CAU_DEF_RX_TIMER_RES);
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
- QED_CAU_DEF_TX_TIMER_RES);
-
cau_state = CAU_HC_DISABLE_STATE;
if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2444,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
- u16 igu_sb_id,
- u16 vf_number,
- u8 vf_valid)
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid)
{
struct cau_sb_entry sb_entry;
@@ -2514,8 +2488,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- QED_COAL_RX_STATE_MACHINE,
- timeset);
+ QED_COAL_RX_STATE_MACHINE, timeset);
if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -2541,8 +2514,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u8 timeset)
{
struct cau_pi_entry pi_entry;
- u32 sb_offset;
- u32 pi_offset;
+ u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
@@ -2569,8 +2541,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_sb_info *sb_info)
+ struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
@@ -2590,8 +2561,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
*
* @return u16
*/
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
- u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2603,8 +2573,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
- (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+ if (sb_id == QED_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
return igu_sb_id;
}
@@ -2612,9 +2586,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2622,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
}
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2656,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
kfree(p_sb);
}
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_sb_sp_info *p_sb;
dma_addr_t p_phys = 0;
@@ -2694,17 +2664,14 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
SB_ALIGNED_SIZE(p_hwfn),
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2721,9 +2688,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
- void *cookie,
- u8 *sb_idx,
- __le16 **p_fw_cons)
+ void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int rc = -ENOMEM;
@@ -2764,8 +2729,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
}
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
@@ -2809,7 +2773,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
- if (rc != 0) {
+ if (rc) {
DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
return -EINVAL;
}
@@ -2822,8 +2786,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
@@ -2950,13 +2913,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid, b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 sb_id)
{
u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY +
- sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
struct qed_igu_block *p_block;
p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2944,7 @@ out:
return val;
}
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
@@ -2993,7 +2953,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
-
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
@@ -3104,22 +3063,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
*/
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
{
- u32 igu_pf_conf = 0;
-
- igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
}
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
{
- u64 intr_status = 0;
- u32 intr_status_lo = 0;
- u32 intr_status_hi = 0;
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE;
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
intr_status_lo = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,26 +3109,20 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
kfree(p_hwfn->sp_dpc);
}
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
- return rc;
- }
+
return rc;
}
@@ -3183,8 +3133,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
qed_int_sp_dpc_free(p_hwfn);
}
-void qed_int_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
qed_int_sb_attn_setup(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 401e738543b5..ddd410a91e13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
u16 rx_mode = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
/* TPA related fields */
- memset(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
+ memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
@@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->tx_switching_en = p_params->tx_switching;
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -109,8 +111,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
@@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- if (p_params->update_approx_mcast_flg) {
- p_ramrod->common.update_approx_mcast_flg = 1;
- for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
- __le32 val = cpu_to_le32(p_bins[i]);
+ if (!p_params->update_approx_mcast_flg)
+ return;
- p_ramrod->approx_mcast.bins[i] = val;
- }
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
- p_cmn->update_accept_any_vlan_flg =
- p_params->update_accept_any_vlan_flg;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
return rc;
}
@@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid(
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
int rc = -EINVAL;
/* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = params->vport_id;
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
+ if (rc)
return rc;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, params->queue_id, params->vport_id,
- params->sb);
+ opaque_fid,
+ cid, p_params->queue_id, p_params->vport_id, p_params->sb);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(params->sb);
- p_ramrod->sb_index = params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
- p_ramrod->complete_cqe_flg = 0;
- p_ramrod->complete_event_flg = 1;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
- p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
- p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- p_ramrod->vf_rx_prod_index = params->vf_qid;
- if (params->vf_qid)
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = p_params->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_use_zone_a_prod ? " [legacy]" : "",
+ p_params->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ }
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
static int
qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
- params->queue_id,
- params->sb,
- params->sb_idx,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
+ if (rc)
return rc;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
return rc;
*pp_prod = (u8 __iomem *)p_hwfn->regview +
@@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- params,
+ p_params,
abs_stats_id,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size);
+ cqe_pbl_addr, cqe_pbl_size, false);
- if (rc != 0)
+ if (rc)
qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
return rc;
@@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
- p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
- p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = qed_get_qm_pq(p_hwfn,
- PROTOCOLID_ETH,
- p_pq_params);
- p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
memset(&pq_params, 0, sizeof(pq_params));
/* Allocate a CID for the queue */
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_tx_cid->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
}
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
{
enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
@@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
- p_second_filter->type = p_first_filter->type;
- p_second_filter->mac_msb = p_first_filter->mac_msb;
- p_second_filter->mac_mid = p_first_filter->mac_mid;
- p_second_filter->mac_lsb = p_first_filter->mac_lsb;
- p_second_filter->vlan_id = p_first_filter->vlan_id;
- p_second_filter->vni = p_first_filter->vni;
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
p_first_filter->vport_id = vport_to_remove_from;
- p_second_filter->action = ETH_FILTER_ACTION_ADD;
- p_second_filter->vport_id = vport_to_add_to;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
p_first_filter->vport_id = vport_to_add_to;
memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
&p_ramrod, &p_ent,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
return rc;
}
@@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
p_header->assert_on_error = p_filter_cmd->assert_on_error;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc != 0) {
- DP_ERR(p_hwfn,
- "Unicast filter ADD command failed %d\n",
- rc);
+ if (rc) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
return rc;
}
@@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* Return:
******************************************************************************/
static u32 qed_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length,
- u32 crc32_seed,
- u8 complement)
+ u32 crc32_length, u32 crc32_seed, u8 complement)
{
- u32 byte = 0;
- u32 bit = 0;
- u8 msb = 0;
- u8 current_byte = 0;
- u32 crc32_result = crc32_seed;
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
if ((!crc32_packet) ||
(crc32_length == 0) ||
@@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static inline u32 qed_crc32c_le(u32 seed,
- u8 *mac,
- u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
{
u32 packet_buf[2] = { 0 };
@@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc, i;
- if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ if (p_filter_cmd->opcode == QED_FILTER_ADD)
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
&abs_vport_id);
- if (rc)
- return rc;
- } else {
+ else
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
&abs_vport_id);
- if (rc)
- return rc;
- }
+ if (rc)
+ return rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
- struct vport_update_ramrod_mcast *approx_mcast;
- approx_mcast = &p_ramrod->approx_mcast;
- approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_mcast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
+ comp_mode, p_comp_data);
}
return rc;
}
@@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_ucast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
- if (rc != 0)
+ comp_mode, p_comp_data);
+ if (rc)
break;
}
@@ -1590,8 +1578,7 @@ out:
}
}
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
@@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+ info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
}
qed_fill_dev_info(cdev, &info->common);
@@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev,
return 0;
}
-static int qed_stop_vport(struct qed_dev *cdev,
- u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
{
int rc, i;
@@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- vport_id);
+ p_hwfn->hw_info.opaque_fid, vport_id);
if (rc) {
DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev,
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg =
- params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg =
- params->update_vport_active_flg;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev,
* We need to re-fix the rss values per engine for CMT.
*/
if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss =
- &params->rss_params;
+ struct qed_update_vport_rss_params *rss = &params->rss_params;
int k, max = 0;
/* Find largest entry, since it's possible RSS needs to
@@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev,
QED_RSS_IND_TABLE_SIZE * sizeof(u16));
memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
QED_RSS_KEY_SIZE * sizeof(u32));
+ sp_params.rss_params = &sp_rss_params;
}
- sp_params.rss_params = &sp_rss_params;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev,
u16 cqe_pbl_size,
void __iomem **pp_prod)
{
- int rc, hwfn_index;
struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
hwfn_index = params->rss_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
@@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev,
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
params->rx_queue_id / cdev->num_hwfns,
- params->eq_completion_only,
- false);
+ params->eq_completion_only, false);
if (rc) {
DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
return rc;
@@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
memset(&accept_flags, 0, sizeof(accept_flags));
- accept_flags.update_rx_mode_config = 1;
- accept_flags.update_tx_mode_config = 1;
- accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
- QED_ACCEPT_MCAST_MATCHED |
- QED_ACCEPT_BCAST;
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
QED_ACCEPT_MCAST_MATCHED |
QED_ACCEPT_BCAST;
@@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
struct qed_filter_ucast ucast;
if (!params->vlan_valid && !params->mac_valid) {
- DP_NOTICE(
- cdev,
- "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ DP_NOTICE(cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
return -EINVAL;
}
@@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
for (i = 0; i < mcast.num_mc_addrs; i++)
ether_addr_copy(mcast.mac[i], params->mac[i]);
- return qed_filter_mcast_cmd(cdev, &mcast,
- QED_SPQ_MODE_CB, NULL);
+ return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev,
accept_flags = params->filter.accept_flags;
return qed_configure_filter_rx_mode(cdev, accept_flags);
default:
- DP_NOTICE(cdev, "Unknown filter type %d\n",
- (int)params->type);
+ DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
return -EINVAL;
}
}
static int qed_fp_cqe_completion(struct qed_dev *dev,
- u8 rss_id,
- struct eth_slow_path_rx_cqe *cqe)
+ u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
cqe);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 002114543451..e495d62fcc03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params {
u16 opaque_fid;
u8 vport_id;
u16 mtu;
+ bool check_mac;
+ bool check_ethtype;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
@@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
@@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000000..a6db10717d5c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,1792 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+struct qed_cb_ll2_info {
+ int rx_cnt;
+ u32 rx_size;
+ u8 handle;
+ bool frags_mapped;
+
+ /* Lock protecting LL2 buffer lists in sleepless context */
+ spinlock_t lock;
+ struct list_head list;
+
+ const struct qed_ll2_cb_ops *cbs;
+ void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+ struct list_head list;
+ void *data;
+ dma_addr_t phys_addr;
+};
+
+static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct sk_buff *skb = cookie;
+
+ /* All we need to do is release the mapping */
+ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+ cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+ b_last_fragment);
+
+ if (cdev->ll2->frags_mapped)
+ /* Case where mapped frags were received, need to
+ * free skb with nr_frags marked as 0
+ */
+ skb_shinfo(skb)->nr_frags = 0;
+
+ dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ u8 **data, dma_addr_t *phys_addr)
+{
+ *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ if (!(*data)) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ return -ENOMEM;
+ }
+
+ *phys_addr = dma_map_single(&cdev->pdev->dev,
+ ((*data) + NET_SKB_PAD),
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+ DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+ kfree((*data));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+ struct qed_ll2_buffer *buffer)
+{
+ spin_lock_bh(&cdev->ll2->lock);
+
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+
+ cdev->ll2->rx_cnt--;
+ if (!cdev->ll2->rx_cnt)
+ DP_INFO(cdev, "All LL2 entries were removed\n");
+
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
+{
+ u16 packet_length = le16_to_cpu(p_cqe->packet_length);
+ struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 vlan = le16_to_cpu(p_cqe->vlan);
+ u32 opaque_data_0, opaque_data_1;
+ u8 pad = p_cqe->placement_offset;
+ dma_addr_t new_phys_addr;
+ struct sk_buff *skb;
+ bool reuse = false;
+ int rc = -EINVAL;
+ u8 *new_data;
+
+ opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
+ opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+ "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+ (u64)p_pkt->rx_buf_addr, pad, packet_length,
+ le16_to_cpu(p_cqe->parse_flags.flags), vlan,
+ opaque_data_0, opaque_data_1);
+
+ if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buffer->data, packet_length, false);
+ }
+
+ /* Determine if data is valid */
+ if (packet_length < ETH_HLEN)
+ reuse = true;
+
+ /* Allocate a replacement for buffer; Reuse upon failure */
+ if (!reuse)
+ rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+ &new_phys_addr);
+
+ /* If need to reuse or there's no replacement buffer, repost this */
+ if (rc)
+ goto out_post;
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out_post;
+ }
+
+ pad += NET_SKB_PAD;
+ skb_reserve(skb, pad);
+ skb_put(skb, packet_length);
+ skb_checksum_none_assert(skb);
+
+ /* Get parital ethernet information instead of eth_type_trans(),
+ * Since we don't have an associated net_device.
+ */
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ /* Pass SKB onward */
+ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+ if (vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ opaque_data_0, opaque_data_1);
+ }
+
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+
+out_post:
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+
+ if (rc)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ bool b_lock,
+ bool b_only_active)
+{
+ struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+ if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+ return NULL;
+
+ if (!p_hwfn->p_ll2_info)
+ return NULL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ if (b_only_active) {
+ if (b_lock)
+ mutex_lock(&p_ll2_conn->mutex);
+ if (p_ll2_conn->b_active)
+ p_ret = p_ll2_conn;
+ if (b_lock)
+ mutex_unlock(&p_ll2_conn->mutex);
+ } else {
+ p_ret = p_ll2_conn;
+ }
+
+ return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+ *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ bool b_last_packet = false, b_last_frag = false;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_tx = &p_ll2_conn->tx_queue;
+
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_release_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+
+ }
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+ struct qed_ll2_tx_packet *p_pkt;
+ bool b_last_frag = false;
+ unsigned long flags;
+ dma_addr_t tx_frag;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->b_completing_packet) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ goto out;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ goto out;
+
+ p_tx->b_completing_packet = true;
+ p_tx->cur_completing_packet = *p_pkt;
+ num_bds_in_packet = p_pkt->bd_used;
+ list_del(&p_pkt->list_entry);
+
+ if (num_bds < num_bds_in_packet) {
+ DP_NOTICE(p_hwfn,
+ "Rest of BDs does not cover whole packet\n");
+ goto out;
+ }
+
+ num_bds -= num_bds_in_packet;
+ p_tx->bds_idx += num_bds_in_packet;
+ while (num_bds_in_packet--)
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+
+ p_tx->b_completing_packet = false;
+ rc = 0;
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags, bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u16 packet_length, parse_flags, vlan;
+ u32 src_mac_addrhi;
+ u16 src_mac_addrlo;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "GSI Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+
+ list_del(&p_pkt->list_entry);
+ parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+ p_ll2_info->my_id,
+ p_pkt->cookie,
+ p_pkt->rx_buf_addr,
+ packet_length,
+ p_cqe->rx_cqe_gsi.data_length_error,
+ parse_flags,
+ vlan,
+ src_mac_addrhi,
+ src_mac_addrlo, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags,
+ bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "LL2 Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
+ p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ while (cq_new_idx != cq_old_idx) {
+ bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+ cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+ switch (cqe->rx_cqe_sp.type) {
+ case CORE_RX_CQE_TYPE_SLOW_PATH:
+ DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
+ rc = -EINVAL;
+ break;
+ case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+ rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ case CORE_RX_CQE_TYPE_REGULAR:
+ rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ default:
+ rc = -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_rx = &p_ll2_conn->rx_queue;
+
+ while (!list_empty(&p_rx->active_descq)) {
+ dma_addr_t rx_buf_addr;
+ void *cookie;
+ bool b_last;
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ rx_buf_addr = p_pkt->rx_buf_addr;
+ cookie = p_pkt->cookie;
+
+ b_last = list_empty(&p_rx->active_descq);
+ }
+}
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ u8 action_on_error)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 cqe_pbl_size;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_rx->rx_sb_index;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base,
+ p_rx->rxq_chain.p_phys_addr);
+ cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+ qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+ p_ramrod->main_func_queue = 1;
+
+ if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+ p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+ p_ramrod->mf_si_bcast_accept_all = 1;
+ p_ramrod->mf_si_mcast_accept_all = 1;
+ } else {
+ p_ramrod->mf_si_bcast_accept_all = 0;
+ p_ramrod->mf_si_mcast_accept_all = 0;
+ }
+
+ p_ramrod->action_on_error.error_type = action_on_error;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct core_tx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params pq_params;
+ u16 pq_id = 0, pbl_size;
+ int rc = -EINVAL;
+
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_tx->tx_sb_index;
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ll2_conn->tx_stats_en = 1;
+ p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+ p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_tx->txq_chain));
+ pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ switch (conn_type) {
+ case QED_LL2_TYPE_ISCSI:
+ case QED_LL2_TYPE_ISCSI_OOO:
+ p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ break;
+ case QED_LL2_TYPE_ROCE:
+ p_ramrod->conn_type = PROTOCOLID_ROCE;
+ break;
+ default:
+ p_ramrod->conn_type = PROTOCOLID_ETH;
+ DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+ }
+
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+ p_ramrod->complete_event_flg = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+{
+ struct qed_ll2_rx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!rx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_bd),
+ &p_ll2_info->rx_queue.rxq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+ goto out;
+ }
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+ goto out;
+ }
+ p_ll2_info->rx_queue.descq_array = p_descq;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_fast_path_cqe),
+ &p_ll2_info->rx_queue.rcq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, rx_num_desc);
+
+out:
+ return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 tx_num_desc)
+{
+ struct qed_ll2_tx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!tx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ tx_num_desc,
+ sizeof(struct core_tx_bd),
+ &p_ll2_info->tx_queue.txq_chain);
+ if (rc)
+ goto out;
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ p_ll2_info->tx_queue.descq_array = p_descq;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, tx_num_desc);
+
+out:
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+ tx_num_desc);
+ return rc;
+}
+
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle)
+{
+ qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+ struct qed_ll2_info *p_ll2_info = NULL;
+ int rc;
+ u8 i;
+
+ if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ /* Find a free connection to be used */
+ for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+ if (p_hwfn->p_ll2_info[i].b_active) {
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ continue;
+ }
+
+ p_hwfn->p_ll2_info[i].b_active = true;
+ p_ll2_info = &p_hwfn->p_ll2_info[i];
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ break;
+ }
+ if (!p_ll2_info)
+ return -EBUSY;
+
+ p_ll2_info->conn_type = p_params->conn_type;
+ p_ll2_info->mtu = p_params->mtu;
+ p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
+ p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
+ p_ll2_info->tx_tc = p_params->tx_tc;
+ p_ll2_info->tx_dest = p_params->tx_dest;
+ p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
+ p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
+ p_ll2_info->gsi_enable = p_params->gsi_enable;
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ /* Register callbacks for the Rx/Tx queues */
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+
+ if (rx_num_desc) {
+ qed_int_register_cb(p_hwfn, comp_rx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->rx_queue.rx_sb_index,
+ &p_ll2_info->rx_queue.p_fw_cons);
+ p_ll2_info->rx_queue.b_cb_registred = true;
+ }
+
+ if (tx_num_desc) {
+ qed_int_register_cb(p_hwfn,
+ comp_tx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->tx_queue.tx_sb_index,
+ &p_ll2_info->tx_queue.p_fw_cons);
+ p_ll2_info->tx_queue.b_cb_registred = true;
+ }
+
+ *p_connection_handle = i;
+ return rc;
+
+q_allocate_fail:
+ qed_ll2_release_connection(p_hwfn, i);
+ return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ u8 action_on_error = 0;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
+ p_ll2_conn->ai_err_packet_too_big);
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+
+ return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+}
+
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ struct qed_ll2_tx_queue *p_tx;
+ int rc = -EINVAL;
+ u32 i, capacity;
+ u8 qid;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+ p_tx = &p_ll2_conn->tx_queue;
+
+ qed_chain_reset(&p_rx->rxq_chain);
+ qed_chain_reset(&p_rx->rcq_chain);
+ INIT_LIST_HEAD(&p_rx->active_descq);
+ INIT_LIST_HEAD(&p_rx->free_descq);
+ INIT_LIST_HEAD(&p_rx->posting_descq);
+ spin_lock_init(&p_rx->lock);
+ capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_rx->descq_array[i].list_entry,
+ &p_rx->free_descq);
+ *p_rx->p_fw_cons = 0;
+
+ qed_chain_reset(&p_tx->txq_chain);
+ INIT_LIST_HEAD(&p_tx->active_descq);
+ INIT_LIST_HEAD(&p_tx->free_descq);
+ INIT_LIST_HEAD(&p_tx->sending_descq);
+ spin_lock_init(&p_tx->lock);
+ capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_tx->descq_array[i].list_entry,
+ &p_tx->free_descq);
+ p_tx->cur_completing_bd_idx = 0;
+ p_tx->bds_idx = 0;
+ p_tx->b_completing_packet = false;
+ p_tx->cur_send_packet = NULL;
+ p_tx->cur_send_frag_num = 0;
+ p_tx->cur_completing_frag_num = 0;
+ *p_tx->p_fw_cons = 0;
+
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ p_ll2_conn->queue_id = qid;
+ p_ll2_conn->tx_stats_id = qid;
+ p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_ll2_conn->cid,
+ DQ_DEMS_LEGACY);
+
+ rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+ return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_rx_queue *p_rx,
+ struct qed_ll2_rx_packet *p_curp)
+{
+ struct qed_ll2_rx_packet *p_posting_packet = NULL;
+ struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ bool b_notify_fw = false;
+ u16 bd_prod, cq_prod;
+
+ /* This handles the flushing of already posted buffers */
+ while (!list_empty(&p_rx->posting_descq)) {
+ p_posting_packet = list_first_entry(&p_rx->posting_descq,
+ struct qed_ll2_rx_packet,
+ list_entry);
+ list_del(&p_posting_packet->list_entry);
+ list_add_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ /* This handles the supplied packet [if there is one] */
+ if (p_curp) {
+ list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ if (!b_notify_fw)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+ cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+}
+
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw)
+{
+ struct core_rx_bd_with_buff_len *p_curb = NULL;
+ struct qed_ll2_rx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags;
+ void *p_data;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ if (!list_empty(&p_rx->free_descq))
+ p_curp = list_first_entry(&p_rx->free_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (p_curp) {
+ if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+ qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+ p_data = qed_chain_produce(&p_rx->rxq_chain);
+ p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+ qed_chain_produce(&p_rx->rcq_chain);
+ }
+ }
+
+ /* If we're lacking entires, let's try to flush buffers to FW */
+ if (!p_curp || !p_curb) {
+ rc = -EBUSY;
+ p_curp = NULL;
+ goto out_notify;
+ }
+
+ /* We have an Rx packet we can fill */
+ DMA_REGPAIR_LE(p_curb->addr, addr);
+ p_curb->buff_length = cpu_to_le16(buf_len);
+ p_curp->rx_buf_addr = addr;
+ p_curp->cookie = cookie;
+ p_curp->rxq_bd = p_curb;
+ p_curp->buf_length = buf_len;
+ list_del(&p_curp->list_entry);
+
+ /* Check if we only want to enqueue this packet without informing FW */
+ if (!notify_fw) {
+ list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+ goto out;
+ }
+
+out_notify:
+ qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_tx_queue *p_tx,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *p_cookie,
+ u8 notify_fw)
+{
+ list_del(&p_curp->list_entry);
+ p_curp->cookie = p_cookie;
+ p_curp->bd_used = num_of_bds;
+ p_curp->notify_fw = notify_fw;
+ p_tx->cur_send_packet = p_curp;
+ p_tx->cur_send_frag_num = 0;
+
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_tx->cur_send_frag_num++;
+}
+
+static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type type,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
+{
+ struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+ u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+ struct core_tx_bd *start_bd = NULL;
+ u16 frag_idx;
+
+ start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+ cpu_to_le16(l4_hdr_offset_w));
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+ start_bd->bd_flags.as_bitfield = bd_flags;
+ start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
+ CORE_TX_BD_FLAGS_START_BD_SHIFT;
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ DMA_REGPAIR_LE(start_bd->addr, first_frag);
+ start_bd->nbytes = cpu_to_le16(first_frag_len);
+
+ SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
+ type);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+ p_ll2->queue_id,
+ p_ll2->cid,
+ p_ll2->conn_type,
+ prod_idx,
+ first_frag_len,
+ num_of_bds,
+ le32_to_cpu(start_bd->addr.hi),
+ le32_to_cpu(start_bd->addr.lo));
+
+ if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ return;
+
+ /* Need to provide the packet with additional BDs for frags */
+ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+ frag_idx < num_of_bds; frag_idx++) {
+ struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+ *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bitfield1 = 0;
+ (*p_bd)->bitfield0 = 0;
+ p_curp->bds_set[frag_idx].tx_frag = 0;
+ p_curp->bds_set[frag_idx].frag_len = 0;
+ }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct core_db_data db_msg = { 0, 0, 0 };
+ u16 bd_prod;
+
+ /* If there are missing BDs, don't do anything now */
+ if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+ p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+ return;
+
+ /* Push the current packet to the list and clean after it */
+ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+ &p_ll2_conn->tx_queue.sending_descq);
+ p_ll2_conn->tx_queue.cur_send_packet = NULL;
+ p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+ /* Notify FW of packet only if requested to */
+ if (!b_notify)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+ while (!list_empty(&p_tx->sending_descq)) {
+ p_pkt = list_first_entry(&p_tx->sending_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ }
+
+ SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+ db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+ /* Make sure the BDs data is updated before ringing the doorbell */
+ wmb();
+
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+ p_ll2_conn->queue_id,
+ p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw)
+{
+ struct qed_ll2_tx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_chain *p_tx_chain;
+ unsigned long flags;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_tx = &p_ll2_conn->tx_queue;
+ p_tx_chain = &p_tx->txq_chain;
+
+ if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ return -EIO;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->cur_send_packet) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ /* Get entry, but only if we have tx elements for it */
+ if (!list_empty(&p_tx->free_descq))
+ p_curp = list_first_entry(&p_tx->free_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ p_curp = NULL;
+
+ if (!p_curp) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (qed_roce_flavor == QED_LL2_ROCE) {
+ roce_flavor = CORE_ROCE;
+ } else if (qed_roce_flavor == QED_LL2_RROCE) {
+ roce_flavor = CORE_RROCE;
+ } else {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Prepare packet and BD, and perhaps send a doorbell to FW */
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
+ num_of_bds, first_frag,
+ first_frag_len, cookie, notify_fw);
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
+ num_of_bds, CORE_TX_DEST_NW,
+ vlan, bd_flags, l4_hdr_offset_w,
+ roce_flavor,
+ first_frag, first_frag_len);
+
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes)
+{
+ struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ u16 cur_send_frag_num = 0;
+ struct core_tx_bd *p_bd;
+ unsigned long flags;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ if (!p_ll2_conn->tx_queue.cur_send_packet)
+ return -EINVAL;
+
+ p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+ cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+ if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ return -EINVAL;
+
+ /* Fill the BD information, and possibly notify FW */
+ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+ DMA_REGPAIR_LE(p_bd->addr, addr);
+ p_bd->nbytes = cpu_to_le16(nbytes);
+ p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+ p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+ p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+ spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+ spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+ return 0;
+}
+
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ int rc = -EINVAL;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ }
+
+ return rc;
+}
+
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ kfree(p_ll2_conn->tx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+ kfree(p_ll2_conn->rx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+ qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+ mutex_lock(&p_ll2_conn->mutex);
+ p_ll2_conn->b_active = false;
+ mutex_unlock(&p_ll2_conn->mutex);
+}
+
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ll2_info *p_ll2_connections;
+ u8 i;
+
+ /* Allocate LL2's set struct */
+ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+ sizeof(struct qed_ll2_info), GFP_KERNEL);
+ if (!p_ll2_connections) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+ return NULL;
+ }
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ p_ll2_connections[i].my_id = i;
+
+ return p_ll2_connections;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ int i;
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ mutex_init(&p_ll2_connections[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ kfree(p_ll2_connections);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_tstorm_per_queue_stat tstats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->packet_too_big_discard =
+ HILO_64_REGPAIR(tstats.packet_too_big_discard);
+ p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_ustorm_per_queue_stat ustats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_pstorm_per_queue_stat pstats;
+ u8 stats_id = p_ll2_conn->tx_stats_id;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+ !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ if (p_ll2_conn->tx_stats_en)
+ _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie)
+{
+ cdev->ll2->cbs = ops;
+ cdev->ll2->cb_cookie = cookie;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ struct qed_ll2_info ll2_info;
+ struct qed_ll2_buffer *buffer;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ptt *p_ptt;
+ int rc, i;
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+ cdev->ll2->frags_mapped = params->frags_mapped;
+
+ /*Allocate memory for LL2 */
+ DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
+ cdev->ll2->rx_size);
+ for (i = 0; i < QED_LL2_RX_SIZE; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
+ if (rc) {
+ kfree(buffer);
+ goto fail;
+ }
+
+ list_add_tail(&buffer->list, &cdev->ll2->list);
+ }
+
+ switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ conn_type = QED_LL2_TYPE_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ conn_type = QED_LL2_TYPE_ROCE;
+ break;
+ default:
+ conn_type = QED_LL2_TYPE_TEST;
+ }
+
+ /* Prepare the temporary ll2 information */
+ memset(&ll2_info, 0, sizeof(ll2_info));
+ ll2_info.conn_type = conn_type;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = 0;
+ ll2_info.tx_dest = CORE_TX_DEST_NW;
+ ll2_info.gsi_enable = 1;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
+ QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+ &cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 connection\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establish LL2 connection\n");
+ goto release_fail;
+ }
+
+ /* Post all Rx buffers to FW */
+ spin_lock_bh(&cdev->ll2->lock);
+ list_for_each_entry(buffer, &cdev->ll2->list, list) {
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc) {
+ DP_INFO(cdev,
+ "Failed to post an Rx buffer; Deleting it\n");
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+ kfree(buffer);
+ } else {
+ cdev->ll2->rx_cnt++;
+ }
+ }
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ if (!cdev->ll2->rx_cnt) {
+ DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
+ goto release_terminate;
+ }
+
+ if (!is_valid_ether_addr(params->ll2_mac_address)) {
+ DP_INFO(cdev, "Invalid Ethernet address\n");
+ goto release_terminate;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto release_terminate;
+ }
+
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ params->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ if (rc) {
+ DP_ERR(cdev, "Failed to allocate LLH filter\n");
+ goto release_terminate_all;
+ }
+
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+
+ return 0;
+
+release_terminate_all:
+
+release_terminate:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+release_fail:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+fail:
+ qed_ll2_kill_buffers(cdev);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+ return -EINVAL;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto fail;
+ }
+
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ cdev->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+fail:
+ return -EINVAL;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+{
+ const skb_frag_t *frag;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+ u8 flags = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+ return -EINVAL;
+ }
+
+ if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+ 1 + skb_shinfo(skb)->nr_frags);
+ return -EINVAL;
+ }
+
+ mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev, "SKB mapping failed\n");
+ return -EINVAL;
+ }
+
+ /* Request HW to calculate IP csum */
+ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan = skb_vlan_tag_get(skb);
+ flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ }
+
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ 1 + skb_shinfo(skb)->nr_frags,
+ vlan, flags, 0, 0 /* RoCE FLAVOR */,
+ mapping, skb->len, skb, 1);
+ if (rc)
+ goto err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ if (!cdev->ll2->frags_mapped) {
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev,
+ mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
+ }
+ } else {
+ mapping = page_to_phys(skb_frag_page(frag)) |
+ frag->page_offset;
+ }
+
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ mapping,
+ skb_frag_size(frag));
+
+ /* if failed not much to do here, partial packet has been posted
+ * we can't free memory, will need to wait for completion.
+ */
+ if (rc)
+ goto err2;
+ }
+
+ return 0;
+
+err:
+ dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+
+err2:
+ return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ if (!cdev->ll2)
+ return -EINVAL;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = &qed_ll2_start,
+ .stop = &qed_ll2_stop,
+ .start_xmit = &qed_ll2_start_xmit,
+ .register_cb_ops = &qed_ll2_register_cb_ops,
+ .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+ return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+ kfree(cdev->ll2);
+ cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000000..80a5dc2d652d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,316 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+
+enum qed_ll2_roce_flavor_type {
+ QED_LL2_ROCE,
+ QED_LL2_RROCE,
+ MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_conn_type {
+ QED_LL2_TYPE_RESERVED,
+ QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TEST,
+ QED_LL2_TYPE_ISCSI_OOO,
+ QED_LL2_TYPE_RESERVED2,
+ QED_LL2_TYPE_ROCE,
+ QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_RX_CONN_TYPE
+};
+
+struct qed_ll2_rx_packet {
+ struct list_head list_entry;
+ struct core_rx_bd_with_buff_len *rxq_bd;
+ dma_addr_t rx_buf_addr;
+ u16 buf_length;
+ void *cookie;
+ u8 placement_offset;
+ u16 parse_flags;
+ u16 packet_length;
+ u16 vlan;
+ u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+ struct list_head list_entry;
+ u16 bd_used;
+ u16 vlan;
+ u16 l4_hdr_offset_w;
+ u8 bd_flags;
+ bool notify_fw;
+ void *cookie;
+
+ struct {
+ struct core_tx_bd *txq_bd;
+ dma_addr_t tx_frag;
+ u16 frag_len;
+ } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+};
+
+struct qed_ll2_rx_queue {
+ /* Lock protecting the Rx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain rxq_chain;
+ struct qed_chain rcq_chain;
+ u8 rx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head posting_descq;
+ struct qed_ll2_rx_packet *descq_array;
+ void __iomem *set_prod_addr;
+};
+
+struct qed_ll2_tx_queue {
+ /* Lock protecting the Tx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain txq_chain;
+ u8 tx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head sending_descq;
+ struct qed_ll2_tx_packet *descq_array;
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
+ u16 cur_completing_bd_idx;
+ void __iomem *doorbell_addr;
+ u16 bds_idx;
+ u16 cur_send_frag_num;
+ u16 cur_completing_frag_num;
+ bool b_completing_packet;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ enum qed_ll2_conn_type conn_type;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
+ u16 mtu;
+ u8 rx_drop_ttl0_flg;
+ u8 rx_vlan_removal_en;
+ u8 tx_tc;
+ enum core_tx_dest tx_dest;
+ enum core_error_handle ai_err_packet_too_big;
+ enum core_error_handle ai_err_no_buf;
+ u8 tx_stats_en;
+ struct qed_ll2_rx_queue rx_queue;
+ struct qed_ll2_tx_queue tx_queue;
+ u8 gsi_enable;
+};
+
+/**
+ * @brief qed_ll2_acquire_connection - allocate resources,
+ * starts rx & tx (if relevant) queues pair. Provides
+ * connecion handler as output parameter.
+ *
+ * @param p_hwfn
+ * @param p_params Contain various configuration properties
+ * @param rx_num_desc
+ * @param tx_num_desc
+ *
+ * @param p_connection_handle Output container for LL2 connection's handle
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle);
+
+/**
+ * @brief qed_ll2_establish_connection - start previously
+ * allocated LL2 queues pair
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param addr rx (physical address) buffers to submit
+ * @param cookie
+ * @param notify_fw produce corresponding Rx BD immediately
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+ * to prepare Tx packet submission to FW.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param num_of_bds a number of requested BD equals a number of
+ * fragments in Tx packet
+ * @param vlan VLAN to insert to packet (if insertion set)
+ * @param bd_flags
+ * @param l4_hdr_offset_w L4 Header Offset from start of packet
+ * (in words). This is needed if both l4_csum
+ * and ipv6_ext are set
+ * @param first_frag
+ * @param first_frag_len
+ * @param cookie
+ *
+ * @param notify_fw
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_release_connection - releases resources
+ * allocated for LL2 connection
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ */
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ * @param addr
+ * @param nbytes
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+/**
+ * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param p_stats
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * @brief qed_ll2_alloc - Allocates LL2 connections set
+ *
+ * @param p_hwfn
+ *
+ * @return pointer to alocated qed_ll2_info or NULL
+ */
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ll2_setup - Inits LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+
+/**
+ * @brief qed_ll2_free - Releases LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c7dc34bfdd0a..4ee3151e80c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,15 +22,22 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#define QED_ROCE_QPS (8192)
+#define QED_ROCE_DPIS (8)
+#endif
+
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -51,8 +58,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
static int __init qed_init(void)
{
- pr_notice("qed_init called\n");
-
pr_info("%s", version);
return 0;
@@ -106,8 +111,7 @@ static void qed_free_pci(struct qed_dev *cdev)
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
-static int qed_init_pci(struct qed_dev *cdev,
- struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id;
int rc;
@@ -207,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported =
- (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
+ QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -263,8 +267,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
}
/* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
- pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
{
if (!cdev)
return -ENODEV;
@@ -366,8 +369,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
- rc = pci_enable_msix_exact(cdev->pdev,
- int_params->msix_table, cnt);
+ rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+ cnt);
if (!rc)
rc = cnt;
}
@@ -439,6 +442,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
}
out:
+ if (!rc)
+ DP_INFO(cdev, "Using %s interrupts\n",
+ int_params->out.int_mode == QED_INT_MODE_INTA ?
+ "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+ "MSI" : "MSIX");
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
@@ -514,19 +522,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
struct qed_dev *cdev = hwfn->cdev;
+ u32 int_mode;
int rc = 0;
u8 id;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX) {
id = hwfn->my_id;
snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
- if (!rc)
- DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
- "Requested slowpath MSI-X\n");
} else {
unsigned long flags = 0;
@@ -541,6 +548,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
flags, cdev->name, cdev);
}
+ if (rc)
+ DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+ else
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath %s\n",
+ (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
return rc;
}
@@ -581,6 +595,8 @@ static int qed_nic_stop(struct qed_dev *cdev)
}
}
+ qed_dbg_pf_exit(cdev);
+
return rc;
}
@@ -599,7 +615,16 @@ static int qed_nic_reset(struct qed_dev *cdev)
static int qed_nic_setup(struct qed_dev *cdev)
{
- int rc;
+ int rc, i;
+
+ /* Determine if interface is going to require LL2 */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->using_ll2 = true;
+ }
+ }
rc = qed_resc_alloc(cdev);
if (rc)
@@ -657,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ int num_l2_queues;
+#endif
int rc;
int i;
@@ -687,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ num_l2_queues = 0;
+ for_each_hwfn(cdev, i)
+ num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA,
+ "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+ cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+ if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+ cdev->int_params.rdma_msix_cnt =
+ (cdev->int_params.fp_msix_cnt - num_l2_queues)
+ / cdev->num_hwfns;
+ cdev->int_params.rdma_msix_base =
+ cdev->int_params.fp_msix_base + num_l2_queues;
+ cdev->int_params.fp_msix_cnt = num_l2_queues;
+ } else {
+ cdev->int_params.rdma_msix_cnt = 0;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+ cdev->int_params.rdma_msix_cnt,
+ cdev->int_params.rdma_msix_base);
+#endif
+
return 0;
}
@@ -790,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -834,13 +894,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (IS_PF(cdev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ if (rc)
goto err2;
- }
/* First Dword used to diffrentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
+
+ qed_dbg_pf_init(cdev);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -864,6 +924,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ /* Allocate LL2 interface if needed */
+ if (QED_LEADING_HWFN(cdev)->using_ll2) {
+ rc = qed_ll2_alloc_if(cdev);
+ if (rc)
+ goto err3;
+ }
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
@@ -884,6 +950,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err3:
+ qed_hw_stop(cdev);
err2:
qed_hw_timers_stop_all(cdev);
if (IS_PF(cdev))
@@ -906,6 +974,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_ll2_dealloc_if(cdev);
+
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
@@ -974,8 +1044,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
@@ -1025,20 +1094,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
- (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
+ (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
- if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1168,50 +1240,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = SUPPORTED_FIBRE;
+ if_link->supported_caps = QED_LM_FIBRE_BIT;
if (params.speed.autoneg)
- if_link->supported_caps |= SUPPORTED_Autoneg;
+ if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
- if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
if (params.pause.autoneg || params.pause.forced_rx ||
params.pause.forced_tx)
- if_link->supported_caps |= SUPPORTED_Pause;
+ if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= 0;
+ if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= 0;
+ if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.link_up)
if_link->speed = link.speed;
@@ -1231,33 +1309,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Half;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_FD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_10G)
- if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_40G)
- if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_50G)
- if_link->lp_caps |= 0;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_100G)
- if_link->lp_caps |= 0;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
+ if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.an_complete)
- if_link->lp_caps |= SUPPORTED_Autoneg;
+ if_link->lp_caps |= QED_LM_Autoneg_BIT;
if (link.partner_adv_pause)
- if_link->lp_caps |= SUPPORTED_Pause;
+ if_link->lp_caps |= QED_LM_Pause_BIT;
if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
- if_link->lp_caps |= SUPPORTED_Asym_Pause;
+ if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
}
static void qed_get_current_link(struct qed_dev *cdev,
@@ -1385,9 +1459,32 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .dbg_all_data = &qed_dbg_all_data,
+ .dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats)
+{
+ struct qed_eth_stats eth_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+ qed_get_vport_stats(cdev, &eth_stats);
+ stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ break;
+ default:
+ DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f776a77794c5..bdc9ba92f6d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
return true;
}
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
- p_info->mfw_mb_shadow =
- kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_KERNEL);
+ p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
@@ -177,7 +171,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
return 0;
err:
- DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
qed_mcp_free(p_hwfn);
return -ENOMEM;
}
@@ -189,8 +182,7 @@ err:
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{
spin_lock_bh(&p_hwfn->mcp_info->lock);
@@ -221,15 +213,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
@@ -326,7 +316,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
*o_mcp_resp = 0;
rc = -EAGAIN;
}
@@ -342,7 +333,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -398,9 +389,36 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+{
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+
+ return 0;
+}
+
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_load_code)
+ struct qed_ptt *p_ptt, u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
@@ -527,8 +545,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
"Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
transceiver_state,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data)));
+ offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
ETH_TRANSCEIVER_STATE);
@@ -540,8 +557,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_reset)
+ struct qed_ptt *p_ptt, bool b_reset)
{
struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
@@ -557,8 +573,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
"Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- link_status)));
+ offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
@@ -635,6 +650,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_20G : 0;
p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_40G : 0;
p_link->partner_adv_speed |=
@@ -722,6 +740,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum qed_mcp_protocol_type stats_type;
+ union qed_mcp_protocol_stats stats;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ u32 hsi_param;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = QED_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ stats_type = QED_MCP_FCOE_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+ break;
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ stats_type = QED_MCP_ISCSI_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+ break;
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ stats_type = QED_MCP_RDMA_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ memcpy(&union_data, &stats, sizeof(stats));
+ mb_params.p_data_src = &union_data;
+ qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
struct public_func *p_shmem_info)
{
@@ -752,8 +812,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
+ struct public_func *p_data, int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
@@ -763,51 +822,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
memset(p_data, 0, sizeof(*p_data));
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf)
-{
- struct public_func shmem_info;
- int i;
-
- /* Find first Ethernet interface in port */
- for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
- i += p_hwfn->cdev->num_ports_in_engines) {
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID_BY_REL(p_hwfn, i));
-
- if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
- continue;
-
- if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
- FUNC_MF_CFG_PROTOCOL_ETHERNET) {
- *p_pf = (u8)i;
- return 0;
- }
- }
-
- DP_NOTICE(p_hwfn,
- "Failed to find on port an ethernet interface in MF_SI mode\n");
-
- return -EINVAL;
-}
-
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
@@ -867,6 +895,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
@@ -940,8 +974,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
@@ -950,7 +983,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -1003,15 +1036,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_mcp_function_info *info;
struct public_func shmem_info;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &info->protocol)) {
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
@@ -1072,15 +1103,13 @@ struct qed_mcp_link_capabilities
return &p_hwfn->mcp_info->link_capabilities;
}
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 1000,
- &resp, &param);
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
msleep(1020);
@@ -1089,8 +1118,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_flash_size)
+ struct qed_ptt *p_ptt, u32 *p_flash_size)
{
u32 flash_size;
@@ -1168,8 +1196,35 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_led_mode mode)
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 value, cpu_mode;
+
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+ return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
u32 resp = 0, param = 0, drv_mb_param;
int rc;
@@ -1195,6 +1250,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not acknowledge mask parity request. Old MFW?\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 7f319aa1b229..dff520ed069b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -60,9 +60,10 @@ struct qed_mcp_link_state {
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
-#define QED_LINK_PARTNER_SPEED_40G BIT(4)
-#define QED_LINK_PARTNER_SPEED_50G BIT(5)
-#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+#define QED_LINK_PARTNER_SPEED_25G BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G BIT(7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
@@ -105,6 +106,47 @@ struct qed_mcp_drv_version {
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
+struct qed_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+ QED_MCP_LAN_STATS,
+ QED_MCP_FCOE_STATS,
+ QED_MCP_ISCSI_STATS,
+ QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+ struct qed_mcp_lan_stats lan_stats;
+ struct qed_mcp_fcoe_stats fcoe_stats;
+ struct qed_mcp_iscsi_stats iscsi_stats;
+ struct qed_mcp_rdma_stats rdma_stats;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -426,6 +468,29 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
@@ -447,6 +512,26 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
@@ -458,6 +543,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
u8 min_bw);
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf);
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f6b86ca1ff79..b414a0542177 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -116,8 +116,14 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+ 0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
+#define TCFC_REG_STRONG_ENABLE_VF \
+ 0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+ 0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
@@ -202,6 +208,26 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+ 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+ 32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+ 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+ 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+ 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
+ 0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+ 16
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -258,6 +284,8 @@
0x1f0a1cUL
#define PRS_REG_ROCE_DEST_QP_MAX_PF \
0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+ 0x1f096cUL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -521,4 +549,910 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+
+#define PGLCS_REG_DBG_SELECT \
+ 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE \
+ 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT \
+ 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID \
+ 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME \
+ 0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+ 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+ 0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+ 0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+ 0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define DMAE_REG_DBG_SELECT \
+ 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+ 0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+ 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+ 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+ 0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+ 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+ 0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+ 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+ 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+ 0x040484UL
+#define GRC_REG_DBG_SELECT \
+ 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+ 0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+ 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID \
+ 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME \
+ 0x0500b4UL
+#define UMAC_REG_DBG_SELECT \
+ 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE \
+ 0x051098UL
+#define UMAC_REG_DBG_SHIFT \
+ 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID \
+ 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME \
+ 0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+ 0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+ 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+ 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+ 0x052444UL
+#define PCIE_REG_DBG_SELECT \
+ 0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+ 0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+ 0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+ 0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+ 0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+ 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+ 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+ 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+ 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+ 0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+ 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+ 0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+ 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID \
+ 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME \
+ 0x181588UL
+#define CAU_REG_DBG_SELECT \
+ 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+ 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+ 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID \
+ 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME \
+ 0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+ 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+ 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+ 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID \
+ 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME \
+ 0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2 \
+ 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+ 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 \
+ 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 \
+ 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+ 0x218264UL
+#define PRM_REG_DBG_SELECT \
+ 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+ 0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+ 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID \
+ 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME \
+ 0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+ 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+ 0x238704UL
+#define SRC_REG_DBG_SHIFT \
+ 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID \
+ 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME \
+ 0x238710UL
+#define RSS_REG_DBG_SELECT \
+ 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+ 0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+ 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID \
+ 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME \
+ 0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+ 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+ 0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+ 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID \
+ 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME \
+ 0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+ 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+ 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+ 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+ 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+ 0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+ 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+ 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+ 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+ 0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+ 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+ 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+ 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+ 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+ 0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+ 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+ 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+ 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+ 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+ 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+ 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+ 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+ 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+ 0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+ 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+ 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+ 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+ 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+ 0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+ 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+ 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+ 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+ 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+ 0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+ 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+ 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+ 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+ 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+ 0x2a8410UL
+#define TM_REG_DBG_SELECT \
+ 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE \
+ 0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+ 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+ 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+ 0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+ 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+ 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+ 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+ 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+ 0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+ 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+ 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+ 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+ 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+ 0x2e0510UL
+#define QM_REG_DBG_SELECT \
+ 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE \
+ 0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+ 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+ 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+ 0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+ 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+ 0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+ 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+ 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+ 0x300510UL
+#define TDIF_REG_DBG_SELECT \
+ 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+ 0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+ 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+ 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+ 0x310510UL
+#define BRB_REG_DBG_SELECT \
+ 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+ 0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+ 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID \
+ 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME \
+ 0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+ 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+ 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+ 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+ 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+ 0x4c1610UL
+#define YULD_REG_DBG_SELECT \
+ 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE \
+ 0x4c9604UL
+#define YULD_REG_DBG_SHIFT \
+ 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID \
+ 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME \
+ 0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+ 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+ 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+ 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+ 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+ 0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+ 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+ 0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+ 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+ 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+ 0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+ 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+ 0x502144UL
+#define NIG_REG_DBG_SHIFT \
+ 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID \
+ 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME \
+ 0x502150UL
+#define BMB_REG_DBG_SELECT \
+ 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+ 0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+ 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID \
+ 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME \
+ 0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+ 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+ 0x560104UL
+#define PTU_REG_DBG_SHIFT \
+ 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID \
+ 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME \
+ 0x560110UL
+#define CDU_REG_DBG_SELECT \
+ 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+ 0x580708UL
+#define CDU_REG_DBG_SHIFT \
+ 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID \
+ 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME \
+ 0x580714UL
+#define WOL_REG_DBG_SELECT \
+ 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE \
+ 0x600144UL
+#define WOL_REG_DBG_SHIFT \
+ 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID \
+ 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME \
+ 0x600150UL
+#define BMBN_REG_DBG_SELECT \
+ 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE \
+ 0x610144UL
+#define BMBN_REG_DBG_SHIFT \
+ 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID \
+ 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME \
+ 0x610150UL
+#define NWM_REG_DBG_SELECT \
+ 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE \
+ 0x8000f0UL
+#define NWM_REG_DBG_SHIFT \
+ 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID \
+ 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME \
+ 0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+ 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+ 0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+ 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID \
+ 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME \
+ 0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+ 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+ 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+ 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+ 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+ 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+ 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+ 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+ 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+ 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+ 0xda4738UL
+#define BTB_REG_DBG_SELECT \
+ 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+ 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+ 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID \
+ 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME \
+ 0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+ 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+ 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+ 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+ 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+ 0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+ 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+ 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+ 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+ 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+ 0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+ 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+ 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+ 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+ 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+ 0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+ 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+ 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+ 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+ 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+ 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+ 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+ 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+ 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+ 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+ 0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+ 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+ 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+ 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+ 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+ 0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+ 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+ 0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+ 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID \
+ 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME \
+ 0x1000050UL
+#define YCM_REG_DBG_SELECT \
+ 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+ 0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+ 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID \
+ 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME \
+ 0x1080050UL
+#define PCM_REG_DBG_SELECT \
+ 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+ 0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+ 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID \
+ 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME \
+ 0x1100050UL
+#define TCM_REG_DBG_SELECT \
+ 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+ 0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+ 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID \
+ 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME \
+ 0x1180050UL
+#define MCM_REG_DBG_SELECT \
+ 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+ 0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+ 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID \
+ 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME \
+ 0x1200050UL
+#define UCM_REG_DBG_SELECT \
+ 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+ 0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+ 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID \
+ 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME \
+ 0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+ 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+ 0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+ 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+ 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+ 0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+ 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+ 0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+ 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+ 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+ 0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+ 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+ 0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+ 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+ 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+ 0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+ 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+ 0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+ 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+ 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+ 0x1701538UL
+#define MSEM_REG_DBG_SELECT \
+ 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+ 0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+ 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+ 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+ 0x1801538UL
+#define USEM_REG_DBG_SELECT \
+ 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+ 0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+ 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+ 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+ 0x1901538UL
+#define PCIE_REG_DBG_COMMON_SELECT \
+ 0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+ 0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT \
+ 0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+ 0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+ 0x0543a8UL
+#define MISC_REG_RESET_PL_UA \
+ 0x008050UL
+#define MISC_REG_RESET_PL_HV \
+ 0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+ 0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+ 0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+ 0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+ 0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+ 0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+ 0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+ 0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+ 0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+ 0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+ 0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+ 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+ 0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+ 0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+ 0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+ 0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+ 0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+ 0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+ 0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+ 0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+ 0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+ 0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+ 0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+ 0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+ 0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+ 0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY \
+ 0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY \
+ 0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+ 0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE \
+ 0x1401408UL
+#define XSEM_REG_DBG_MODE1_CFG \
+ 0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY \
+ 0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+ 0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE \
+ 0x1501408UL
+#define YSEM_REG_DBG_MODE1_CFG \
+ 0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+ 0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY \
+ 0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY \
+ 0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+ 0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE \
+ 0x1601408UL
+#define PSEM_REG_DBG_MODE1_CFG \
+ 0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+ 0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY \
+ 0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY \
+ 0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+ 0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE \
+ 0x1701408UL
+#define TSEM_REG_DBG_MODE1_CFG \
+ 0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY \
+ 0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY \
+ 0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+ 0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE \
+ 0x1801408UL
+#define MSEM_REG_DBG_MODE1_CFG \
+ 0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+ 0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY \
+ 0x1901140UL
+#define USEM_REG_SYNC_DBG_EMPTY \
+ 0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+ 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+ 0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE \
+ 0x1901408UL
+#define USEM_REG_DBG_MODE1_CFG \
+ 0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+ 0x1940000UL
+#define SEM_FAST_REG_INT_RAM \
+ 0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+ 0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+ 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+ 0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+ 0x181520UL
+#define MCP_REG_CPU_MODE \
+ 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT \
+ (0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+ 0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+ 0x341500UL
+#define SEM_FAST_REG_STALL_0 \
+ 0x000488UL
+#define SEM_FAST_REG_STALLED \
+ 0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+ 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+ 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+ 0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+ 0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+ 0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+ 0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+ 0x009074UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
+#define MCP_REG_CPU_REG_FILE \
+ 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+ 32
+#define DBG_REG_DEBUG_TARGET \
+ 0x01005cUL
+#define DBG_REG_FULL_MODE \
+ 0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+ 0x010480UL
+#define GRC_REG_TRACE_FIFO \
+ 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+ 0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+ 0x010454UL
+#define DBG_REG_FRAMING_MODE \
+ 0x010058UL
+#define SEM_FAST_REG_VFC_DATA_WR \
+ 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+ 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+ 0x000b48UL
+#define RSS_REG_RSS_RAM_DATA \
+ 0x238c20UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define NWS_REG_NWS_CMU \
+ 0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+ 0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+ 0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+ 0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+ 0x0006c4UL
+#define MS_REG_MS_CMU \
+ 0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_REG_PHY0 \
+ 0x620000UL
+#define PHY_PCIE_REG_PHY1 \
+ 0x624000UL
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000000..23430059471c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,2954 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/bitops.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+#include "qed_ll2.h"
+
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
+ p_eqe->opcode, &p_eqe->data);
+}
+
+static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap) {
+ DP_NOTICE(p_hwfn,
+ "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
+ return -ENOMEM;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
+ bmap->bitmap);
+ return 0;
+}
+
+static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
+
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+
+ if (*id_num >= bmap->max_count) {
+ DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
+ bmap->max_count);
+ return -EINVAL;
+ }
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ return 0;
+}
+
+static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ return;
+ }
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
+{
+ return QED_CAU_DEF_RX_TIMER_RES;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
+ rc);
+ goto free_rdma_info;
+ }
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
+ rc);
+ goto free_rdma_dev;
+ }
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ kfree(p_rdma_info->cid_map.bitmap);
+ kfree(p_rdma_info->tid_map.bitmap);
+ kfree(p_rdma_info->toggle_bits.bitmap);
+ kfree(p_rdma_info->cq_map.bitmap);
+ kfree(p_rdma_info->dpi_map.bitmap);
+ kfree(p_rdma_info->pd_map.bitmap);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+ p_cnq_params->sb_num =
+ cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ return p_port;
+}
+
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params, u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_resize_cq(void *rdma_cxt,
+ struct qed_rdma_resize_cq_in_params *in_params,
+ struct qed_rdma_resize_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_resize_cq_output_params *p_ramrod_res;
+ struct rdma_resize_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ u8 fw_return_code;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_resize_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed resize cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_RESIZE_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_resize_cq;
+
+ p_ramrod->flags = 0;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
+ in_params->icid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
+ in_params->pbl_two_level);
+
+ p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
+ p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
+ p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ goto err;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
+ out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
+
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+ DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->
+ p_rdma_info->proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ __le32 *dst_gid)
+{
+ u32 i;
+
+ if (qp->roce_mode == ROCE_V2_IPV4) {
+ /* The IPv4 addresses shall be aligned to the highest word.
+ * The lower words must be zero.
+ */
+ memset(src_gid, 0, sizeof(union qed_gid));
+ memset(dst_gid, 0, sizeof(union qed_gid));
+ src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+ dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+ } else {
+ /* GIDs and IPv6 addresses coincide in location and size */
+ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+ src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+ dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+ }
+ }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+ enum roce_flavor flavor;
+
+ switch (roce_mode) {
+ case ROCE_V1:
+ flavor = PLAIN_ROCE;
+ break;
+ case ROCE_V2_IPV4:
+ flavor = RROCE_IPV4;
+ break;
+ case ROCE_V2_IPV6:
+ flavor = ROCE_V2_IPV6;
+ break;
+ default:
+ flavor = MAX_ROCE_MODE;
+ break;
+ }
+ return flavor;
+}
+
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 responder_icid;
+ u32 requester_icid;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &responder_icid);
+ if (rc) {
+ spin_unlock_bh(&p_rdma_info->lock);
+ return rc;
+ }
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ if (rc)
+ goto err;
+
+ /* the two icid's should be adjacent */
+ if ((requester_icid - responder_icid) != 1) {
+ DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+ requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+
+ /* If these icids require a new ILT line allocate DMA-able context for
+ * an ILT page
+ */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+ if (rc)
+ goto err;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+ if (rc)
+ goto err;
+
+ *cid = (u16)responder_icid;
+ return rc;
+
+err:
+ spin_lock_bh(&p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Allocate CID - failed, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for IRQ */
+ qp->irq_num_pages = 1;
+ qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->irq_phys_addr, GFP_KERNEL);
+ if (!qp->irq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->irq_num_pages = qp->irq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->rq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
+ rc, physical_queue0);
+
+ if (rc)
+ goto err;
+
+ qp->resp_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for ORQ */
+ qp->orq_num_pages = 1;
+ qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->orq_phys_addr, GFP_KERNEL);
+ if (!qp->orq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->orq_num_pages = qp->orq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->sq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+
+ if (rc)
+ goto err;
+
+ qp->req_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+ return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_sqd,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !(qp->req_offloaded))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+ qp->sqd_async);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_invalidated_mw)
+{
+ struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+ struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+ p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+
+ if (!p_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+
+ /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ qp->resp_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct roce_destroy_qp_resp_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_bound_mw)
+{
+ struct roce_destroy_qp_req_output_params *p_ramrod_res;
+ struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->req_offloaded)
+ return 0;
+
+ p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+
+ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+
+ qp->req_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+ struct roce_query_qp_req_output_params *p_req_ramrod_res;
+ struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+ struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t resp_ramrod_res_phys;
+ dma_addr_t req_ramrod_res_phys;
+ struct qed_spq_entry *p_ent;
+ bool rq_err_state;
+ bool sq_err_state;
+ bool sq_draining;
+ int rc = -ENOMEM;
+
+ if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+ /* We can't send ramrod to the fw since this qp wasn't offloaded
+ * to the fw yet
+ */
+ out_params->draining = false;
+ out_params->rq_psn = qp->rq_psn;
+ out_params->sq_psn = qp->sq_psn;
+ out_params->state = qp->cur_state;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+ return 0;
+ }
+
+ if (!(qp->resp_offloaded)) {
+ DP_NOTICE(p_hwfn,
+ "The responder's qp should be offloded before requester's\n");
+ return -EINVAL;
+ }
+
+ /* Send a query responder ramrod to FW to get RQ-PSN and state */
+ p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
+ if (!p_resp_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_resp;
+
+ p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+ DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_resp;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+ out_params->state = qp->cur_state;
+
+ return 0;
+ }
+
+ /* Send a query requester ramrod to FW to get SQ-PSN and state */
+ p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_req_ramrod_res),
+ &req_ramrod_res_phys,
+ GFP_KERNEL);
+ if (!p_req_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ init_data.cid = qp->icid + 1;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_req;
+
+ p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+ DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_req;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+ sq_draining =
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+ else if (sq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_SQE;
+ else if (sq_draining)
+ out_params->draining = true;
+ out_params->state = qp->cur_state;
+
+ return 0;
+
+err_req:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+ return rc;
+err_resp:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+ return rc;
+}
+
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u32 num_invalidated_mw = 0;
+ u32 num_bound_mw = 0;
+ u32 start_cid;
+ int rc;
+
+ /* Destroys the specified QP */
+ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+ DP_NOTICE(p_hwfn,
+ "QP must be in error, reset or init state before destroying it\n");
+ return -EINVAL;
+ }
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
+ if (rc)
+ return rc;
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+
+ /* Release responder's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid - start_cid);
+
+ /* Release requester's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid + 1 - start_cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return 0;
+}
+
+int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
+ return NULL;
+ }
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ u32 num_invalidated_mw = 0, num_bound_mw = 0;
+ int rc = 0;
+
+ /* Perform additional operations according to the current state and the
+ * next state
+ */
+ if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+ (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+ /* Init->RTR or Reset->RTR */
+ rc = qed_roce_sp_create_responder(p_hwfn, qp);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTR-> RTS */
+ rc = qed_roce_sp_create_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+
+ /* Send modify responder ramrod */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTS->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* RTS->SQD */
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* SQD->SQD */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* SQD->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
+ qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ /* ->ERR */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+ params->modify_flags);
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+ /* Any state -> RESET */
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+ &num_bound_mw);
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ }
+
+ return rc;
+}
+
+int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
+ p_hwfn->p_rdma_info->last_tid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_roce_ll2_packet *packet = cookie;
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+ roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+ cookie, first_frag_addr,
+ b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet)
+{
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+ struct qed_roce_ll2_rx_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_roce_ll2_packet pkt;
+
+ DP_VERBOSE(cdev,
+ QED_MSG_LL2,
+ "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+ (void *)(uintptr_t)rx_buf_addr,
+ data_length, data_length_error);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.n_seg = 1;
+ pkt.payload[0].baddr = rx_buf_addr;
+ pkt.payload[0].len = data_length;
+
+ memset(&params, 0, sizeof(params));
+ params.vlan_id = vlan;
+ *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+ *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+ if (data_length_error) {
+ DP_ERR(cdev,
+ "roce ll2 rx complete: data length error %d, length=%d\n",
+ data_length_error, data_length);
+ params.rc = -EINVAL;
+ }
+
+ roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev,
+ "qed roce mac filter failed - roce_info/ll2 NULL\n");
+ return -EINVAL;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwfn->ll2->lock);
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ new_mac_address);
+ mutex_unlock(&hwfn->ll2->lock);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+ return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2;
+ struct qed_ll2_info ll2_params;
+ int rc;
+
+ if (!params) {
+ DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+ return -EINVAL;
+ }
+ if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+ params->cbs.tx_cb, params->cbs.rx_cb);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(params->mac_address)) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+ params->mac_address);
+ return -EINVAL;
+ }
+
+ /* Initialize */
+ roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+ if (!roce_ll2) {
+ DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+ return -ENOMEM;
+ }
+ memset(roce_ll2, 0, sizeof(*roce_ll2));
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+ roce_ll2->cbs = params->cbs;
+ roce_ll2->cb_cookie = params->cb_cookie;
+ mutex_init(&roce_ll2->lock);
+
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+ ll2_params.mtu = params->mtu;
+ ll2_params.rx_drop_ttl0_flg = true;
+ ll2_params.rx_vlan_removal_en = false;
+ ll2_params.tx_dest = CORE_TX_DEST_NW;
+ ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+ ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+ ll2_params.gsi_enable = true;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+ params->max_rx_buffers,
+ params->max_tx_buffers,
+ &roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ hwfn->ll2 = roce_ll2;
+
+ rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+ if (rc) {
+ hwfn->ll2 = NULL;
+ goto err2;
+ }
+ ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+ return 0;
+
+err2:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+ kfree(roce_ll2);
+ return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ int rc;
+
+ if (!cdev) {
+ DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
+ return -EINVAL;
+ }
+
+ if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+ return -EINVAL;
+ }
+
+ /* remove LL2 MAC address filter */
+ rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+ eth_zero_addr(roce_ll2->mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+ rc);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ kfree(roce_ll2);
+
+ return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_tx_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ enum qed_ll2_roce_flavor_type qed_roce_flavor;
+ u8 flags = 0;
+ int rc;
+ int i;
+
+ if (!cdev || !pkt || !params) {
+ DP_ERR(cdev,
+ "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+ cdev, pkt, params);
+ return -EINVAL;
+ }
+
+ qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+ : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ /* Tx header */
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+ 1 + pkt->n_seg, 0, flags, 0,
+ qed_roce_flavor, pkt->header.baddr,
+ pkt->header.len, pkt, 1);
+ if (rc) {
+ DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_HEAD_FAILURE;
+ }
+
+ /* Tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+ if (rc) {
+ /* If failed not much to do here, partial packet has
+ * been posted * we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(cdev,
+ "roce ll2 tx: payload failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_FRAG_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw)
+{
+ return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->ll2->handle,
+ buf->baddr, buf->len,
+ (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle, stats);
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .roce_ll2_start = &qed_roce_ll2_start,
+ .roce_ll2_stop = &qed_roce_ll2_stop,
+ .roce_ll2_tx = &qed_roce_ll2_tx,
+ .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+ .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .roce_ll2_stats = &qed_roce_ll2_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops()
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000000..2f091e8a0f40
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,216 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_ll2.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_resize_cq_in_params {
+ u16 icid;
+ u32 cq_size;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+};
+
+struct qed_rdma_resize_cq_out_params {
+ u32 prod;
+ u32 cons;
+};
+
+struct qed_rdma_resize_cnq_in_params {
+ u32 cnq_id;
+ u32 pbl_page_size_log;
+ u64 pbl_ptr;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+int
+qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params);
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
+void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
+int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params);
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
+int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
+int qed_rdma_stop(void *rdma_cxt);
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
+u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe);
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
+int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params);
+int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+#endif
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index a342bfe4280d..9b7678f26909 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -2,6 +2,7 @@
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_sp.h"
+#include "qed_selftest.h"
int qed_selftest_memory(struct qed_dev *cdev)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index a548504c3420..652c90819758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -61,6 +61,10 @@ union ramrod_data {
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct rdma_init_func_ramrod_data rdma_init_func;
@@ -81,6 +85,7 @@ union ramrod_data {
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+ struct roce_init_func_ramrod_data roce_init_func;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a52f3fc051f5..2888eb0628f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -25,9 +25,7 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u8 cmd,
- u8 protocol,
- struct qed_sp_init_data *p_data)
+ u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
rc = qed_spq_get_entry(p_hwfn, pp_ent);
- if (rc != 0)
+ if (rc)
return rc;
p_ent = *pp_ent;
@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
- PROTOCOLID_COMMON,
- &init_data);
+ PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
- qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
- &p_ramrod->tunnel_config);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index,
- p_ramrod->outer_tag);
+ sb, sb_index, p_ramrod->outer_tag);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d73456eab1d7..caff41544898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,6 +28,9 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#include "qed_roce.h"
+#endif
/***************************************************************************
* Structures & Definitions
@@ -41,8 +44,7 @@
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
- union event_ring_data *data,
- u8 fw_return_code)
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
@@ -109,9 +111,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
{
p_ent->flags = 0;
@@ -189,8 +190,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
- struct qed_spq *p_spq,
- struct qed_spq_entry *p_ent)
+ struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -240,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ case PROTOCOLID_ROCE:
+ qed_async_roce_event(p_hwfn, p_eqe);
+ return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -255,8 +260,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
/***************************************************************************
* EQ API
***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
- u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +271,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
mmiowb();
}
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
- void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
@@ -323,17 +325,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
- if (!p_eq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ if (!p_eq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -342,17 +341,12 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ &p_eq->chain))
goto eq_allocate_fail;
- }
/* register EQ completion on the SP SB */
- qed_int_register_cb(p_hwfn,
- qed_eq_completion,
- p_eq,
- &p_eq->eq_sb_index,
- &p_eq->p_fw_cons);
+ qed_int_register_cb(p_hwfn, qed_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
@@ -361,14 +355,12 @@ eq_allocate_fail:
return NULL;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
if (!p_eq)
return;
@@ -379,10 +371,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
-static int qed_cqe_completion(
- struct qed_hwfn *p_hwfn,
- struct eth_slow_path_rx_cqe *cqe,
- enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
@@ -463,12 +454,9 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
u32 capacity;
/* SPQ struct */
- p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
- if (!p_spq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ if (!p_spq)
return -ENOMEM;
- }
/* SPQ ring */
if (qed_chain_alloc(p_hwfn->cdev,
@@ -477,18 +465,14 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ &p_spq->chain))
goto spq_allocate_fail;
- }
/* allocate and fill the SPQ elements (incl. ramrod data list) */
capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- capacity *
- sizeof(struct qed_spq_entry),
+ capacity * sizeof(struct qed_spq_entry),
&p_phys, GFP_KERNEL);
-
if (!p_virt)
goto spq_allocate_fail;
@@ -525,9 +509,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
kfree(p_spq);
}
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +520,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
@@ -564,8 +547,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +568,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -604,8 +585,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_en2->list);
/* Copy the ring element physical pointer to the new
@@ -655,8 +635,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
- struct list_head *head,
- u32 keep_reserve)
+ struct list_head *head, u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
@@ -690,8 +669,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
if (!p_ent)
return -EINVAL;
@@ -705,8 +683,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *fw_return_code)
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -803,8 +780,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return -EINVAL;
spin_lock_bh(&p_spq->lock);
- list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
- list) {
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
@@ -846,15 +822,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
if (!found) {
DP_NOTICE(p_hwfn,
- "Failed to find an entry this EQE completes\n");
+ "Failed to find an entry this EQE [echo %04x] completes\n",
+ le16_to_cpu(echo));
return -EEXIST;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -878,10 +861,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
- if (!p_consq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ if (!p_consq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -889,10 +870,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ 0x80, &p_consq->chain))
goto consq_allocate_fail;
- }
return p_consq;
@@ -901,14 +880,12 @@ consq_allocate_fail:
return NULL;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
if (!p_consq)
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 15399da268d9..d2d6621fe0e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
}
fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
- if (fp_minor > ETH_HSI_VER_MINOR) {
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
@@ -107,8 +108,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
+static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -185,8 +186,8 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
-int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
- int vfid, struct qed_ptt *p_ptt)
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
{
struct qed_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
@@ -454,10 +455,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
}
p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
- if (!p_sriov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_sriov)
return -ENOMEM;
- }
p_hwfn->pf_iov_info = p_sriov;
@@ -506,10 +505,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
/* Allocate a new struct for IOV information */
cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
- if (!cdev->p_iov_info) {
- DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ if (!cdev->p_iov_info)
return -ENOMEM;
- }
+
cdev->p_iov_info->pos = pos;
rc = qed_iov_pci_cfg_info(cdev);
@@ -575,7 +573,7 @@ static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
}
}
-void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
{
u16 i;
@@ -699,7 +697,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
- val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
@@ -1090,13 +1088,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
- if (!(tlvs_mask & (1 << i)))
+ if (!(tlvs_mask & BIT(i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
- if (tlvs_accepted & (1 << i))
+ if (tlvs_accepted & BIT(i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1132,9 +1130,10 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
-struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
@@ -1145,7 +1144,7 @@ struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
return &vf->p_vf_info;
}
-void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
{
struct qed_public_vf_info *vf_info;
@@ -1241,6 +1240,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
return PFVF_STATUS_NO_RESOURCE;
}
@@ -1280,22 +1289,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overriden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
- DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
- vf->abs_vf_id,
- req->vfdev_info.eth_fp_hsi_major,
- req->vfdev_info.eth_fp_hsi_minor,
- ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
-
- /* Write the PF version so that VF would know which version
- * is supported.
- */
- pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
- pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
- goto out;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
}
/* On 100g PFs, prevent old VFs from loading */
@@ -1334,8 +1363,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
- pfdev_info->minor_fp_hsi = min_t(u8,
- ETH_HSI_VER_MINOR,
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1470,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = qed_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- QED_SPQ_MODE_CB, NULL);
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1492,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ if ((events & BIT(VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
@@ -1479,7 +1508,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if (events & BIT(MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1502,7 +1531,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
- if (events & (1 << VLAN_ADDR_FORCED)) {
+ if (events & BIT(VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
@@ -1572,7 +1601,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
- p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
@@ -1619,8 +1648,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
- vf->igu_sbs[sb_id],
- vf->abs_vf_id, 1);
+ vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
@@ -1632,7 +1660,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
- if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1650,9 +1678,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
+ params.check_mac = true;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
@@ -1679,7 +1708,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
@@ -1695,21 +1724,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *vf, u8 status)
+ struct qed_vf_info *vf,
+ u8 status, bool b_legacy)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone,
@@ -1717,7 +1757,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
- qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
@@ -1728,6 +1768,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
+ bool b_legacy_vf = false;
int rc;
memset(&params, 0, sizeof(params));
@@ -1743,13 +1784,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ b_legacy_vf = true;
+ } else {
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+ }
+
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->bd_max_bytes,
req->rxq_addr,
- req->cqe_pbl_addr, req->cqe_pbl_size);
+ req->cqe_pbl_addr, req->cqe_pbl_size,
+ b_legacy_vf);
if (rc) {
status = PFVF_STATUS_FAILURE;
@@ -1760,7 +1815,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
@@ -1769,23 +1824,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
- p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
+ p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
}
- qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
@@ -2045,7 +2115,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
- if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
@@ -2340,7 +2410,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2444,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
int i;
/* If we're in forced-mode, we don't allow any change */
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
/* First remove entries and then add new ones */
@@ -2441,8 +2511,8 @@ qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
- int vfid, struct qed_filter_ucast *params)
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
{
struct qed_public_vf_info *vf;
@@ -2509,7 +2579,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
}
/* Determine if the unicast filtering is acceptible by PF */
- if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2591,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2749,7 +2819,7 @@ cleanup:
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
- ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
@@ -2759,7 +2829,8 @@ cleanup:
return rc;
}
-int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ack_vfs[VF_MAX_STATIC / 32];
int rc = 0;
@@ -2805,7 +2876,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
continue;
vfid = p_vf->abs_vf_id;
- if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
@@ -2946,7 +3017,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
{
u64 add_bit = 1ULL << (vfid % 64);
@@ -3064,14 +3135,13 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
-void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
- u16 pvid, int vfid)
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
@@ -3104,7 +3174,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
return !!p_vf_info->vport_instance;
}
-bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3126,7 +3196,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
return vf_info->spoof_chk;
}
-int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
{
struct qed_vf_info *vf;
int rc = -EINVAL;
@@ -3163,13 +3233,14 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
}
-u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
@@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
@@ -3201,7 +3272,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
{
struct qed_vf_info *vf;
u8 vport_id;
@@ -3760,7 +3832,8 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
-void qed_iov_pf_task(struct work_struct *work)
+static void qed_iov_pf_task(struct work_struct *work)
+
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 9b780b31b15c..abf5bf11f865 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
return p_tlv;
}
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
@@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = -EBUSY;
- goto exit;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
-exit:
- mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
-
return rc;
}
@@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn,
QED_MSG_IOV, "attempting to acquire resources\n");
+ /* Clear response buffer, as this might be a re-send */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
@@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* PF agrees to allocate our resources */
if (!(resp->pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
- DP_INFO(p_hwfn,
- "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
- return -EINVAL;
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
resources_acquired = true;
@@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
attempts < VF_ACQUIRE_THRESH) {
qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = -EINVAL;
+ goto exit;
+ }
- /* Clear response buffer */
- memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
- } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
- pfdev_info->major_fp_hsi &&
- (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
- DP_NOTICE(p_hwfn,
- "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
- pfdev_info->major_fp_hsi,
- pfdev_info->minor_fp_hsi,
- ETH_HSI_VER_MAJOR,
- ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
- return -EINVAL;
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn,
+ "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to see if it supports FW-version override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+ rc = -EINVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto exit;
}
}
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
- if (ETH_HSI_VER_MINOR &&
+ if (!p_iov->b_pre_fp_hsi &&
+ ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
}
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
@@ -286,31 +331,23 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
/* Allocate vf sriov info */
p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
- if (!p_iov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_iov)
return -ENOMEM;
- }
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
&p_iov->vf2pf_request_phys,
GFP_KERNEL);
- if (!p_iov->vf2pf_request) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `vf2pf_request' DMA memory\n");
+ if (!p_iov->vf2pf_request)
goto free_p_iov;
- }
p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
&p_iov->pf2vf_reply_phys,
GFP_KERNEL);
- if (!p_iov->pf2vf_reply) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `pf2vf_reply' DMA memory\n");
+ if (!p_iov->pf2vf_reply)
goto free_vf2pf_request;
- }
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -347,6 +384,9 @@ free_p_iov:
return -ENOMEM;
}
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_qid,
@@ -374,6 +414,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (pp_prod && p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -381,13 +436,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
/* Learn the address of the producer from the response */
- if (pp_prod) {
+ if (pp_prod && !p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -399,6 +456,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -424,10 +483,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -470,13 +534,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
if (pp_doorbell) {
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ u32 db_addr;
+
+ db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ db_addr;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
tx_queue_id, *pp_doorbell, resp->offset);
}
exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -501,10 +579,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -543,10 +626,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -567,10 +655,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -770,13 +863,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
@@ -797,14 +895,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
p_hwfn->b_int_enabled = 0;
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
@@ -828,6 +931,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
+ qed_vf_pf_req_end(p_hwfn, rc);
+
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
@@ -896,12 +1001,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
@@ -920,12 +1030,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
@@ -1071,8 +1186,8 @@ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
return false;
}
-bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
- u8 *dst_mac, u8 *p_is_forced)
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
{
struct qed_bulletin_content *bulletin;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b23ce58e932f..35db7a28aa13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
@@ -551,6 +551,11 @@ struct qed_vf_iov {
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
};
#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 74a49850d74d..28dc58919c85 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
+qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 02b06d4e40ae..28c0e9f42c9e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,7 +25,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 1
+#define QEDE_REVISION_VERSION 9
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
@@ -36,6 +36,8 @@
struct qede_stats {
u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
@@ -104,6 +106,13 @@ struct qede_vlan {
bool configured;
};
+struct qede_rdma_dev {
+ struct qedr_dev *qedr_dev;
+ struct list_head entry;
+ struct list_head roce_event_list;
+ struct workqueue_struct *roce_wq;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -124,16 +133,22 @@ struct qede_dev {
(edev)->dev_info.num_tc)
struct qede_fastpath *fp_array;
- u16 req_rss;
- u16 num_rss;
+ u8 req_num_tx;
+ u8 fp_num_tx;
+ u8 req_num_rx;
+ u8 fp_num_rx;
+ u16 req_queues;
+ u16 num_queues;
u8 num_tc;
-#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
-#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
- (edev)->num_tc)
-#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
-#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
+ (edev)->num_tc)
+#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
+ QEDE_TSS_COUNT(edev))
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \
- (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))])
struct qed_int_info int_info;
@@ -177,6 +192,8 @@ struct qede_dev {
unsigned long sp_flags;
u16 vxlan_dst_port;
u16 geneve_dst_port;
+
+ struct qede_rdma_dev rdma_info;
};
enum QEDE_STATE {
@@ -235,6 +252,7 @@ struct qede_rx_queue {
u16 num_rx_buffers;
u16 rxq_id;
+ u64 rcv_pkts;
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -263,6 +281,10 @@ struct qede_tx_queue {
union db_prod tx_db;
u16 num_tx_buffers;
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+
+ bool is_legacy;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -277,7 +299,11 @@ struct qede_tx_queue {
struct qede_fastpath {
struct qede_dev *edev;
- u8 rss_id;
+#define QEDE_FASTPATH_TX BIT(0)
+#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+ u8 type;
+ u8 id;
struct napi_struct napi;
struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq;
@@ -337,6 +363,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
-#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f8492cac9290..25a9b293ee8f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -35,6 +35,7 @@ static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rcv_pkts),
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
@@ -44,6 +45,24 @@ static const struct {
#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
(*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
qede_rqstats_arr[(sindex)].offset)))
+#define QEDE_TQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_tx_queue, stat_name))
+#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_TQSTAT(stat_name) \
+ {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
+#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_tqstats_arr[] = {
+ QEDE_TQSTAT(xmit_pkts),
+ QEDE_TQSTAT(stopped_cnt),
+};
+
+#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
+ (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
+ qede_tqstats_arr[(sindex)].offset)))
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -107,6 +126,8 @@ static const struct {
QEDE_PF_STAT(mftag_filter_discards),
QEDE_PF_STAT(mac_filter_discards),
QEDE_STAT(tx_err_drop_pkts),
+ QEDE_STAT(ttl0_discard),
+ QEDE_STAT(packet_too_big_discard),
QEDE_STAT(coalesced_pkts),
QEDE_STAT(coalesced_events),
@@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
+ for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ int tc;
+
+ for (j = 0; j < QEDE_NUM_RQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d: %s", i, qede_rqstats_arr[j].string);
+ k += QEDE_NUM_RQSTATS;
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d.%d: %s", i, tc,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
+ }
+ }
+
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- strcpy(buf + j * ETH_GSTRING_LEN,
+ strcpy(buf + (k + j) * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
}
-
- for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
- strcpy(buf + j * ETH_GSTRING_LEN,
- qede_rqstats_arr[k].string);
}
static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
+ for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
+ int tc;
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
+ buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
+ }
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
+ buf[cnt++] = QEDE_TQSTATS_DATA(edev,
+ sidx,
+ qid, tc);
+ }
+ }
+ }
+
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
}
- for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
- buf[cnt] = 0;
- for (qid = 0; qid < edev->num_rss; qid++)
- buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
- cnt++;
- }
-
mutex_unlock(&edev->qede_lock);
}
@@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
- return num_stats + QEDE_NUM_RQSTATS;
+ return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
+ QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
@@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev)
return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
}
-static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+struct qede_link_mode_mapping {
+ u32 qed_link_mode;
+ u32 ethtool_link_mode;
+};
+
+static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
+ {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
+ {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
+ {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
+ {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
+ {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {QED_LM_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+};
+
+#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if ((caps) & (qed_lm_map[i].qed_link_mode)) \
+ __set_bit(qed_lm_map[i].ethtool_link_mode,\
+ lk_ksettings->link_modes.name); \
+ } \
+}
+
+#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if (test_bit(qed_lm_map[i].ethtool_link_mode, \
+ lk_ksettings->link_modes.name)) \
+ caps |= qed_lm_map[i].qed_link_mode; \
+ } \
+}
+
+static int qede_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
+ struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
- cmd->supported = current_link.supported_caps;
- cmd->advertising = current_link.advertised_caps;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
+
if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
- ethtool_cmd_speed_set(cmd, current_link.speed);
- cmd->duplex = current_link.duplex;
+ base->speed = current_link.speed;
+ base->duplex = current_link.duplex;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- cmd->port = current_link.port;
- cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
- AUTONEG_DISABLE;
- cmd->lp_advertising = current_link.lp_caps;
+ base->port = current_link.port;
+ base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
return 0;
}
-static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int qede_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
+ const struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
- u32 speed;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
-
memset(&current_link, 0, sizeof(current_link));
memset(&params, 0, sizeof(params));
edev->ops->common->get_link(edev->cdev, &current_link);
- speed = ethtool_cmd_speed(cmd);
params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (base->autoneg == AUTONEG_ENABLE) {
params.autoneg = true;
params.forced_speed = 0;
- params.adv_speeds = cmd->advertising;
- } else { /* forced speed */
+ QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
+ } else { /* forced speed */
params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
params.autoneg = false;
- params.forced_speed = speed;
- switch (speed) {
+ params.forced_speed = base->speed;
+ switch (base->speed) {
case SPEED_10000:
if (!(current_link.supported_caps &
- SUPPORTED_10000baseKR_Full)) {
+ QED_LM_10000baseKR_Full_BIT)) {
DP_INFO(edev, "10G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
+ break;
+ case SPEED_25000:
+ if (!(current_link.supported_caps &
+ QED_LM_25000baseKR_Full_BIT)) {
+ DP_INFO(edev, "25G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
break;
case SPEED_40000:
if (!(current_link.supported_caps &
- SUPPORTED_40000baseLR4_Full)) {
+ QED_LM_40000baseLR4_Full_BIT)) {
DP_INFO(edev, "40G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
+ break;
+ case SPEED_50000:
+ if (!(current_link.supported_caps &
+ QED_LM_50000baseKR2_Full_BIT)) {
+ DP_INFO(edev, "50G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
+ break;
+ case SPEED_100000:
+ if (!(current_link.supported_caps &
+ QED_LM_100000baseKR4_Full_BIT)) {
+ DP_INFO(edev, "100G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
break;
default:
- DP_INFO(edev, "Unsupported speed %u\n", speed);
+ DP_INFO(edev, "Unsupported speed %u\n", base->speed);
return -EINVAL;
}
}
@@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
- edev->dp_module;
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
}
static void qede_set_msglevel(struct net_device *ndev, u32 level)
@@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_params link_params;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev,
rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs;
- for_each_rss(i) {
+ for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u8)i, sb_id);
@@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev,
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
if (epause->autoneg) {
- if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
+ if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
DP_INFO(edev, "autoneg not supported\n");
return -EINVAL;
}
@@ -580,6 +695,28 @@ static int qede_set_pauseparam(struct net_device *dev,
return 0;
}
+static void qede_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buffer)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ regs->version = 0;
+ memset(buffer, 0, regs->len);
+
+ if (edev->ops && edev->ops->common)
+ edev->ops->common->dbg_all_data(edev->cdev, buffer);
+}
+
+static int qede_get_regs_len(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->ops && edev->ops->common)
+ return edev->ops->common->dbg_all_data_size(edev->cdev);
+ else
+ return -EINVAL;
+}
+
static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
{
edev->ndev->mtu = args->mtu;
@@ -619,45 +756,70 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
- channels->combined_count = QEDE_RSS_CNT(edev);
+ channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
+ edev->fp_num_rx;
+ channels->tx_count = edev->fp_num_tx;
+ channels->rx_count = edev->fp_num_rx;
}
static int qede_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qede_dev *edev = netdev_priv(dev);
+ u32 count;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count,
channels->other_count, channels->combined_count);
- /* We don't support separate rx / tx, nor `other' channels. */
- if (channels->rx_count || channels->tx_count ||
- channels->other_count || (channels->combined_count == 0) ||
- (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
+ count = channels->rx_count + channels->tx_count +
+ channels->combined_count;
+
+ /* We don't support `other' channels */
+ if (channels->other_count) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"command parameters not supported\n");
return -EINVAL;
}
+ if (!(channels->combined_count || (channels->rx_count &&
+ channels->tx_count))) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "need to request at least one transmit and one receive channel\n");
+ return -EINVAL;
+ }
+
+ if (count > QEDE_MAX_RSS_CNT(edev)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "requested channels = %d max supported channels = %d\n",
+ count, QEDE_MAX_RSS_CNT(edev));
+ return -EINVAL;
+ }
+
/* Check if there was a change in the active parameters */
- if (channels->combined_count == QEDE_RSS_CNT(edev)) {
+ if ((count == QEDE_QUEUE_CNT(edev)) &&
+ (channels->tx_count == edev->fp_num_tx) &&
+ (channels->rx_count == edev->fp_num_rx)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"No change in active parameters\n");
return 0;
}
/* We need the number of queues to be divisible between the hwfns */
- if (channels->combined_count % edev->dev_info.common.num_hwfns) {
+ if ((count % edev->dev_info.common.num_hwfns) ||
+ (channels->tx_count % edev->dev_info.common.num_hwfns) ||
+ (channels->rx_count % edev->dev_info.common.num_hwfns)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
- "Number of channels must be divisable by %04x\n",
+ "Number of channels must be divisible by %04x\n",
edev->dev_info.common.num_hwfns);
return -EINVAL;
}
/* Set number of queues and reload if necessary */
- edev->req_rss = channels->combined_count;
+ edev->req_queues = count;
+ edev->req_num_tx = channels->tx_count;
+ edev->req_num_rx = channels->rx_count;
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -727,7 +889,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = edev->num_rss;
+ info->data = QEDE_RSS_COUNT(edev);
return 0;
case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info);
@@ -930,7 +1092,7 @@ static void qede_netif_start(struct qede_dev *edev)
if (!netif_running(edev->ndev))
return;
- for_each_rss(i) {
+ for_each_queue(i) {
/* Update and reenable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
napi_enable(&edev->fp_array[i].napi);
@@ -942,7 +1104,7 @@ static void qede_netif_stop(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
/* Disable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
@@ -952,11 +1114,23 @@ static void qede_netif_stop(struct qede_dev *edev)
static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct sk_buff *skb)
{
- struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
int i, idx, val;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ txq = edev->fp_array[i].txqs;
+ break;
+ }
+ }
+
+ if (!txq) {
+ DP_NOTICE(edev, "Tx path is not available\n");
+ return -1;
+ }
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb;
@@ -1020,14 +1194,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
u8 *data_ptr;
int i;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ rxq = edev->fp_array[i].rxq;
+ break;
+ }
+ }
+
+ if (!rxq) {
+ DP_NOTICE(edev, "Rx path is not available\n");
+ return -1;
+ }
+
/* The packet is expected to receive on rx-queue 0 even though RSS is
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
@@ -1228,9 +1414,11 @@ static int qede_get_tunable(struct net_device *dev,
}
static const struct ethtool_ops qede_ethtool_ops = {
- .get_settings = qede_get_settings,
- .set_settings = qede_set_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
+ .get_regs_len = qede_get_regs_len,
+ .get_regs = qede_get_regs,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
@@ -1260,7 +1448,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .get_settings = qede_get_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9544e4c41359..343038ca047d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -36,7 +36,7 @@
#include <linux/random.h>
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
-
+#include <linux/qed/qede_roce.h>
#include "qede.h"
static char version[] =
@@ -100,7 +100,8 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
-static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -109,6 +110,9 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
@@ -189,8 +193,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
struct ethtool_drvinfo drvinfo;
struct qede_dev *edev;
- /* Currently only support name change */
- if (event != NETDEV_CHANGENAME)
+ if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
goto done;
/* Check whether this is a qede device */
@@ -203,11 +206,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
goto done;
edev = netdev_priv(ndev);
- /* Notify qed of the name change */
- if (!edev->ops || !edev->ops->common)
- goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name,
- "qede");
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ break;
+ case NETDEV_CHANGEADDR:
+ edev = netdev_priv(ndev);
+ qede_roce_event_changeaddr(edev);
+ break;
+ }
done:
return NOTIFY_DONE;
@@ -222,7 +232,7 @@ int __init qede_init(void)
{
int ret;
- pr_notice("qede_init: %s\n", version);
+ pr_info("qede_init: %s\n", version);
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
@@ -253,7 +263,8 @@ int __init qede_init(void)
static void __exit qede_cleanup(void)
{
- pr_notice("qede_cleanup called\n");
+ if (debug & QED_LOG_INFO_MASK)
+ pr_info("qede_cleanup called\n");
unregister_netdevice_notifier(&qede_netdev_notifier);
pci_unregister_driver(&qede_pci_driver);
@@ -270,8 +281,7 @@ module_exit(qede_cleanup);
/* Unmap the data and free skb */
static int qede_free_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- int *len)
+ struct qede_tx_queue *txq, int *len)
{
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -329,8 +339,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
static void qede_free_failed_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
- int nbd,
- bool data_split)
+ int nbd, bool data_split)
{
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -339,8 +348,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
@@ -366,8 +374,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return again prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
/* Free skb */
dev_kfree_skb_any(skb);
@@ -376,8 +383,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
}
static u32 qede_xmit_type(struct qede_dev *edev,
- struct sk_buff *skb,
- int *ipv6_ext)
+ struct sk_buff *skb, int *ipv6_ext)
{
u32 rc = XMIT_L4_CSUM;
__be16 l3_proto;
@@ -434,15 +440,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
}
static int map_frag_to_bd(struct qede_dev *edev,
- skb_frag_t *frag,
- struct eth_tx_bd *bd)
+ skb_frag_t *frag, struct eth_tx_bd *bd)
{
dma_addr_t mapping;
/* Map skb non-linear frag data for DMA */
mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
return -ENOMEM;
@@ -504,9 +508,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
}
/* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
struct netdev_queue *netdev_txq;
@@ -526,12 +529,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
- WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
- (MAX_SKB_FRAGS + 1));
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
@@ -606,6 +608,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
}
+ /* Legacy FW had flipped behavior in regard to this bit -
+ * I.e., needed to set to prevent FW from touching encapsulated
+ * packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ first_bd->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -731,6 +741,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
+ txq->stopped_cnt++;
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
/* paired memory barrier is in qede_tx_int(), we have to keep
@@ -764,8 +775,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
}
-static int qede_tx_int(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
@@ -791,6 +801,7 @@ static int qede_tx_int(struct qede_dev *edev,
bytes_compl += len;
pkts_compl++;
txq->sw_tx_cons++;
+ txq->xmit_pkts++;
}
netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
@@ -963,8 +974,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
static u32 qede_get_rxhash(struct qede_dev *edev,
u8 bitfields,
- __le32 rss_hash,
- enum pkt_hash_types *rxhash_type)
+ __le32 rss_hash, enum pkt_hash_types *rxhash_type)
{
enum rss_hash_type htype;
@@ -993,12 +1003,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
static inline void qede_skb_receive(struct qede_dev *edev,
struct qede_fastpath *fp,
- struct sk_buff *skb,
- u16 vlan_tag)
+ struct sk_buff *skb, u16 vlan_tag)
{
if (vlan_tag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
}
@@ -1021,8 +1029,7 @@ static void qede_set_gro_params(struct qede_dev *edev,
static int qede_fill_frag_skb(struct qede_dev *edev,
struct qede_rx_queue *rxq,
- u8 tpa_agg_index,
- u16 len_on_bd)
+ u8 tpa_agg_index, u16 len_on_bd)
{
struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
NUM_RX_BDS_MAX];
@@ -1209,7 +1216,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#endif
send_skb:
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1413,7 +1420,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
edev->ops->eth_cqe_completion(
- edev->cdev, fp->rss_id,
+ edev->cdev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe;
}
@@ -1470,7 +1477,7 @@ alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
+ "skb allocation failed, dropping incoming packet\n");
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
goto next_cqe;
@@ -1578,14 +1585,13 @@ alloc_skb:
skb->protocol = eth_type_trans(skb, edev->ndev);
rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
- fp_cqe->rss_hash,
- &rxhash_type);
+ fp_cqe->rss_hash, &rxhash_type);
skb_set_hash(skb, rx_hash, rxhash_type);
qede_set_skb_csum(skb, csum_flag);
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
next_rx_only:
@@ -1604,6 +1610,8 @@ next_cqe: /* don't consume bd rx buffer */
/* Update producers */
qede_update_rx_prod(edev, rxq);
+ rxq->rcv_pkts += rx_pkt;
+
return rx_pkt;
}
@@ -1616,10 +1624,12 @@ static int qede_poll(struct napi_struct *napi, int budget)
u8 tc;
for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
+ if (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]);
- rx_work_done = qede_has_rx_work(fp->rxq) ?
+ rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
qed_sb_update_sb_idx(fp->sb_info);
@@ -1639,8 +1649,10 @@ static int qede_poll(struct napi_struct *napi, int budget)
rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
+ if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ||
+ (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_has_tx_work(fp)))) {
napi_complete(napi);
/* Update and reenable interrupts */
@@ -1711,6 +1723,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
+ edev->stats.ttl0_discard = stats.ttl0_discard;
edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
@@ -1790,9 +1804,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
}
-static struct rtnl_link_stats64 *qede_get_stats64(
- struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -2106,14 +2120,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
}
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "marked vlan %d as non-configured\n",
- vlan->vid);
+ "marked vlan %d as non-configured\n", vlan->vid);
}
edev->accept_any_vlan = false;
}
-int qede_set_features(struct net_device *dev, netdev_features_t features)
+static int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
@@ -2149,7 +2162,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2160,7 +2173,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->geneve_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2184,7 +2197,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2195,7 +2208,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->geneve_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2240,15 +2253,13 @@ static const struct net_device_ops qede_netdev_ops = {
static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct pci_dev *pdev,
struct qed_dev_eth_info *info,
- u32 dp_module,
- u8 dp_level)
+ u32 dp_module, u8 dp_level)
{
struct net_device *ndev;
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues,
- info->num_queues);
+ info->num_queues, info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -2264,6 +2275,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+ info->num_queues, info->num_queues);
+
SET_NETDEV_DEV(ndev, &pdev->dev);
memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2352,7 +2366,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
struct qede_fastpath *fp;
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
fp = &edev->fp_array[i];
kfree(fp->sb_info);
@@ -2361,22 +2375,33 @@ static void qede_free_fp_array(struct qede_dev *edev)
}
kfree(edev->fp_array);
}
- edev->num_rss = 0;
+
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
}
static int qede_alloc_fp_array(struct qede_dev *edev)
{
+ u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
int i;
- edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
sizeof(*edev->fp_array), GFP_KERNEL);
if (!edev->fp_array) {
DP_NOTICE(edev, "fp array allocation failed\n");
goto err;
}
- for_each_rss(i) {
+ fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+ /* Allocate the FP elements for Rx queues followed by combined and then
+ * the Tx. This ordering should be maintained so that the respective
+ * queues (Rx or Tx) will be together in the fastpath array and the
+ * associated ids will be sequential.
+ */
+ for_each_queue(i) {
fp = &edev->fp_array[i];
fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
@@ -2385,16 +2410,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev, "RXQ struct allocation failed\n");
- goto err;
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else if (fp_combined) {
+ fp->type = QEDE_FASTPATH_COMBINED;
+ fp_combined--;
+ } else {
+ fp->type = QEDE_FASTPATH_TX;
}
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev, "TXQ array allocation failed\n");
- goto err;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
+ GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev,
+ "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev,
+ "RXQ struct allocation failed\n");
+ goto err;
+ }
}
}
@@ -2456,7 +2498,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
bool is_vf, enum qede_probe_mode mode)
{
struct qed_probe_params probe_params;
- struct qed_slowpath_params params;
+ struct qed_slowpath_params sp_params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
struct qed_dev *cdev;
@@ -2479,14 +2521,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_update_pf_params(cdev);
/* Start the Slowpath-process */
- memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = QED_INT_MODE_MSIX;
- params.drv_major = QEDE_MAJOR_VERSION;
- params.drv_minor = QEDE_MINOR_VERSION;
- params.drv_rev = QEDE_REVISION_VERSION;
- params.drv_eng = QEDE_ENGINEERING_VERSION;
- strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
- rc = qed_ops->common->slowpath_start(cdev, &params);
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDE_MAJOR_VERSION;
+ sp_params.drv_minor = QEDE_MINOR_VERSION;
+ sp_params.drv_rev = QEDE_REVISION_VERSION;
+ sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
goto err1;
@@ -2509,10 +2551,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
+ rc = qede_roce_dev_add(edev);
+ if (rc)
+ goto err3;
+
rc = register_netdev(edev->ndev);
if (rc) {
DP_NOTICE(edev, "Cannot register net-device\n");
- goto err3;
+ goto err4;
}
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
@@ -2532,6 +2578,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
+err4:
+ qede_roce_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -2578,8 +2626,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
cancel_delayed_work_sync(&edev->sp_task);
+
unregister_netdev(ndev);
+ qede_roce_dev_remove(edev);
+
edev->ops->common->set_power_state(cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
@@ -2590,7 +2641,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
qed_ops->common->remove(cdev);
- pr_notice("Ending successfully qede_remove\n");
+ dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
static void qede_remove(struct pci_dev *pdev)
@@ -2609,8 +2660,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
u16 rss_num;
/* Setup queues according to possible resources*/
- if (edev->req_rss)
- rss_num = edev->req_rss;
+ if (edev->req_queues)
+ rss_num = edev->req_queues;
else
rss_num = netif_get_num_default_rss_queues() *
edev->dev_info.common.num_hwfns;
@@ -2620,11 +2671,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
if (rc > 0) {
/* Managed to request interrupts for our queues */
- edev->num_rss = rc;
+ edev->num_queues = rc;
DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
- QEDE_RSS_CNT(edev), rss_num);
+ QEDE_QUEUE_CNT(edev), rss_num);
rc = 0;
}
+
+ edev->fp_num_tx = edev->req_num_tx;
+ edev->fp_num_rx = edev->req_num_rx;
+
return rc;
}
@@ -2638,16 +2693,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
/* This function allocates fast-path status block memory */
static int qede_alloc_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = dma_alloc_coherent(&edev->pdev->dev,
- sizeof(*sb_virt),
- &sb_phys, GFP_KERNEL);
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
@@ -2679,16 +2732,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
data = rx_buf->data;
dma_unmap_page(&edev->pdev->dev,
- rx_buf->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
__free_page(data);
}
}
-static void qede_free_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
int i;
if (edev->gro_disable)
@@ -2707,8 +2759,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
}
}
-static void qede_free_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
qede_free_sge_mem(edev, rxq);
@@ -2730,9 +2781,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
struct page *data;
- u16 rx_buf_size;
-
- rx_buf_size = rxq->rx_buf_size;
data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
@@ -2767,8 +2815,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static int qede_alloc_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
dma_addr_t mapping;
int i;
@@ -2815,15 +2862,14 @@ err:
}
/* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
- edev->ndev->mtu;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
if (rxq->rx_buf_size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE;
@@ -2877,8 +2923,7 @@ err:
return rc;
}
-static void qede_free_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
kfree(txq->sw_tx_ring);
@@ -2888,8 +2933,7 @@ static void qede_free_mem_txq(struct qede_dev *edev,
}
/* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
int size, rc;
union eth_tx_bd_types *p_virt;
@@ -2921,41 +2965,45 @@ err:
}
/* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int tc;
qede_free_mem_sb(edev, fp->sb_info);
- qede_free_mem_rxq(edev, fp->rxq);
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_free_mem_rxq(edev, fp->rxq);
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_TX)
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
- * which contains status block, one rx queue and multiple per-TC tx queues.
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
*/
-static int qede_alloc_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int rc, tc;
- rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
- if (rc)
- goto err;
-
- rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
goto err;
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc)
goto err;
}
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+ }
+
return 0;
err:
return rc;
@@ -2965,7 +3013,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
qede_free_mem_fp(edev, fp);
@@ -2975,16 +3023,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
/* This function allocates all qede memory at NIC load. */
static int qede_alloc_mem_load(struct qede_dev *edev)
{
- int rc = 0, rss_id;
+ int rc = 0, queue_id;
- for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
- struct qede_fastpath *fp = &edev->fp_array[rss_id];
+ for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[queue_id];
rc = qede_alloc_mem_fp(edev, fp);
if (rc) {
DP_ERR(edev,
"Failed to allocate memory for fastpath - rss id = %d\n",
- rss_id);
+ queue_id);
qede_free_mem_load(edev);
return rc;
}
@@ -2996,30 +3044,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int rss_id, txq_index, tc;
+ int queue_id, rxq_index = 0, txq_index = 0, tc;
struct qede_fastpath *fp;
- for_each_rss(rss_id) {
- fp = &edev->fp_array[rss_id];
+ for_each_queue(queue_id) {
+ fp = &edev->fp_array[queue_id];
fp->edev = edev;
- fp->rss_id = rss_id;
+ fp->id = queue_id;
memset((void *)&fp->napi, 0, sizeof(fp->napi));
memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
- fp->rxq->rxq_id = rss_id;
+ if (fp->type & QEDE_FASTPATH_RX) {
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rxq_index++;
+ }
- memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
- fp->txqs[tc].index = txq_index;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ memset((void *)fp->txqs, 0,
+ (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ fp->txqs[tc].index = txq_index +
+ tc * QEDE_TSS_COUNT(edev);
+ if (edev->dev_info.is_legacy)
+ fp->txqs[tc].is_legacy = true;
+ }
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
- edev->ndev->name, rss_id);
+ edev->ndev->name, queue_id);
}
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
@@ -3029,12 +3085,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
return rc;
@@ -3047,7 +3104,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
netif_napi_del(&edev->fp_array[i].napi);
@@ -3059,7 +3116,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
int i;
/* Add NAPI objects */
- for_each_rss(i) {
+ for_each_queue(i) {
netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
qede_poll, NAPI_POLL_WEIGHT);
napi_enable(&edev->fp_array[i].napi);
@@ -3088,14 +3145,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
int i, rc;
/* Sanitize number of interrupts == number of prepared RSS queues */
- if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
DP_ERR(edev,
"Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
- QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
return -EINVAL;
}
- for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -3140,18 +3197,17 @@ static int qede_setup_irqs(struct qede_dev *edev)
/* qed should learn receive the RSS ids and callbacks */
ops = edev->ops->common;
- for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
ops->simd_handler_config(edev->cdev,
&edev->fp_array[i], i,
qede_simd_fp_handler);
- edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
}
return 0;
}
static int qede_drain_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- bool allow_drain)
+ struct qede_tx_queue *txq, bool allow_drain)
{
int rc, cnt = 1000;
@@ -3203,45 +3259,53 @@ static int qede_stop_queues(struct qede_dev *edev)
}
/* Flush Tx queues. If needed, request drain from MCP */
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
}
}
- /* Stop all Queues in reverse order*/
- for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params;
- /* Stop the Tx Queue(s)*/
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
-
- tx_params.rss_id = i;
- tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
- rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
+ /* Stop the Tx Queue(s) */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+ u8 val;
+
+ tx_params.rss_id = i;
+ val = edev->fp_array[i].txqs[tc].index;
+ tx_params.tx_queue_id = val;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
}
}
- /* Stop the Rx Queue*/
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = i;
+ /* Stop the Rx Queue */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
- rc = edev->ops->q_rx_stop(cdev, &rx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
}
}
@@ -3264,7 +3328,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
- if (!edev->num_rss) {
+ if (!edev->num_queues) {
DP_ERR(edev,
"Cannot update V-VPORT as active as there are no Rx queues\n");
return -EINVAL;
@@ -3288,50 +3352,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
-
- memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- rc = edev->ops->q_rx_start(cdev, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- phys_table,
- fp->rxq->rx_comp_ring.page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
- if (rc) {
- DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
- return rc;
- }
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
- fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ struct qede_rx_queue *rxq = fp->rxq;
+ __le16 *val;
- qede_update_rx_prod(edev, fp->rxq);
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = rxq->rxq_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ p_phys_table =
+ qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+ page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ rxq->rx_buf_size,
+ rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt,
+ &rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+ rc);
+ return rc;
+ }
+
+ val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ rxq->hw_cons_ptr = val;
+
+ qede_update_rx_prod(edev, rxq);
+ }
+
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
for (tc = 0; tc < edev->num_tc; tc++) {
struct qede_tx_queue *txq = &fp->txqs[tc];
- int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
q_params.rss_id = i;
- q_params.queue_id = txq_index;
+ q_params.queue_id = txq->index;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
q_params.sb_idx = TX_PI(tc);
rc = edev->ops->q_tx_start(cdev, &q_params,
- txq->tx_pbl.pbl.p_phys_table,
- txq->tx_pbl.page_cnt,
+ p_phys_table, page_cnt,
&txq->doorbell_addr);
if (rc) {
DP_ERR(edev, "Start TXQ #%d failed %d\n",
- txq_index, rc);
+ txq->index, rc);
return rc;
}
@@ -3362,13 +3442,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
/* Fill struct with RSS params */
- if (QEDE_RSS_CNT(edev) > 1) {
+ if (QEDE_RSS_COUNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
/* Need to validate current RSS config uses valid entries */
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
if (edev->rss_params.rss_ind_table[i] >=
- edev->num_rss) {
+ QEDE_RSS_COUNT(edev)) {
reset_rss_indir = true;
break;
}
@@ -3381,7 +3461,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
u16 indir_val;
- val = QEDE_RSS_CNT(edev);
+ val = QEDE_RSS_COUNT(edev);
indir_val = ethtool_rxfh_indir_default(i, val);
edev->rss_params.rss_ind_table[i] = indir_val;
}
@@ -3447,6 +3527,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Starting qede unload\n");
+ qede_roce_dev_event_close(edev);
mutex_lock(&edev->qede_lock);
edev->state = QEDE_STATE_CLOSED;
@@ -3510,7 +3591,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
if (rc)
goto err1;
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_RSS_CNT(edev), edev->num_tc);
+ QEDE_QUEUE_CNT(edev), edev->num_tc);
rc = qede_set_real_num_queues(edev);
if (rc)
@@ -3547,6 +3628,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
/* Query whether link is already-up */
memset(&link_output, 0, sizeof(link_output));
edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
DP_INFO(edev, "Ending successfully qede load\n");
@@ -3563,7 +3645,9 @@ err2:
err1:
edev->ops->common->set_fp_int(edev->cdev, 0);
qede_free_fp_array(edev);
- edev->num_rss = 0;
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
err0:
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
new file mode 100644
index 000000000000..9867f960b063
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -0,0 +1,314 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_roce.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_roce_supported(struct qede_dev *dev)
+{
+ return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_roce_dev_add(struct qede_dev *edev)
+{
+ if (!qedr_drv)
+ return;
+
+ edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+ edev->ndev);
+}
+
+static int qede_roce_create_wq(struct qede_dev *edev)
+{
+ INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
+ edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
+ if (!edev->rdma_info.roce_wq) {
+ DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qede_roce_cleanup_event(struct qede_dev *edev)
+{
+ struct list_head *head = &edev->rdma_info.roce_event_list;
+ struct qede_roce_event_work *event_node;
+
+ flush_workqueue(edev->rdma_info.roce_wq);
+ while (!list_empty(head)) {
+ event_node = list_entry(head->next, struct qede_roce_event_work,
+ list);
+ cancel_work_sync(&event_node->work);
+ list_del(&event_node->list);
+ kfree(event_node);
+ }
+}
+
+static void qede_roce_destroy_wq(struct qede_dev *edev)
+{
+ qede_roce_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.roce_wq);
+}
+
+int qede_roce_dev_add(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ if (qede_roce_supported(edev)) {
+ rc = qede_roce_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_roce_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+
+ return rc;
+}
+
+static void _qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+ qedr_drv->remove(edev->rdma_info.qedr_dev);
+ edev->rdma_info.qedr_dev = NULL;
+}
+
+void qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ qede_roce_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_remove(edev);
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_open(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_close(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_roce_dev_shutdown(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_roce_register_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+ u8 qedr_counter = 0;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv) {
+ mutex_unlock(&qedr_dev_list_lock);
+ return -EINVAL;
+ }
+ qedr_drv = drv;
+
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ struct net_device *ndev;
+
+ qedr_counter++;
+ _qede_roce_dev_add(edev);
+ ndev = edev->ndev;
+ if (netif_running(ndev) && netif_oper_up(ndev))
+ _qede_roce_dev_open(edev);
+ }
+ mutex_unlock(&qedr_dev_list_lock);
+
+ DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
+ qedr_counter);
+
+ return 0;
+}
+EXPORT_SYMBOL(qede_roce_register_driver);
+
+void qede_roce_unregister_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+
+ mutex_lock(&qedr_dev_list_lock);
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ if (edev->rdma_info.qedr_dev)
+ _qede_roce_dev_remove(edev);
+ }
+ qedr_drv = NULL;
+ mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_roce_unregister_driver);
+
+static void qede_roce_changeaddr(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
+ *edev)
+{
+ struct qede_roce_event_work *event_node = NULL;
+ struct list_head *list_node = NULL;
+ bool found = false;
+
+ list_for_each(list_node, &edev->rdma_info.roce_event_list) {
+ event_node = list_entry(list_node, struct qede_roce_event_work,
+ list);
+ if (!work_pending(&event_node->work)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node) {
+ DP_NOTICE(edev,
+ "qedr: Could not allocate memory for roce work\n");
+ return NULL;
+ }
+ list_add_tail(&event_node->list,
+ &edev->rdma_info.roce_event_list);
+ }
+
+ return event_node;
+}
+
+static void qede_roce_handle_event(struct work_struct *work)
+{
+ struct qede_roce_event_work *event_node;
+ enum qede_roce_event event;
+ struct qede_dev *edev;
+
+ event_node = container_of(work, struct qede_roce_event_work, work);
+ event = event_node->event;
+ edev = event_node->ptr;
+
+ switch (event) {
+ case QEDE_UP:
+ qede_roce_dev_open(edev);
+ break;
+ case QEDE_DOWN:
+ qede_roce_dev_close(edev);
+ break;
+ case QEDE_CLOSE:
+ qede_roce_dev_shutdown(edev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qede_roce_changeaddr(edev);
+ break;
+ default:
+ DP_NOTICE(edev, "Invalid roce event %d", event);
+ }
+}
+
+static void qede_roce_add_event(struct qede_dev *edev,
+ enum qede_roce_event event)
+{
+ struct qede_roce_event_work *event_node;
+
+ if (!edev->rdma_info.qedr_dev)
+ return;
+
+ event_node = qede_roce_get_free_event_node(edev);
+ if (!event_node)
+ return;
+
+ event_node->event = event;
+ event_node->ptr = edev;
+
+ INIT_WORK(&event_node->work, qede_roce_handle_event);
+ queue_work(edev->rdma_info.roce_wq, &event_node->work);
+}
+
+void qede_roce_dev_event_open(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_UP);
+}
+
+void qede_roce_dev_event_close(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_DOWN);
+}
+
+void qede_roce_event_changeaddr(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 3ebef27e0964..3ae3968b0edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -432,18 +432,19 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
struct net_device *netdev,
- struct net_device *filter_dev, int idx)
+ struct net_device *filter_dev, int *idx)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_sriov_check(adapter))
- idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+ err = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
- return idx;
+ return err;
}
static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 24061b9b92e8..5f327659efa7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -238,7 +238,7 @@ int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
-int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
#else
static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index afd687e5e779..50eaafa3eaba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1915,7 +1915,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
}
int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
- u16 vlan, u8 qos)
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1928,6 +1928,9 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
if (vf >= sriov->num_vfs || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan > MAX_VLAN_ID) {
netdev_err(netdev,
"Invalid VLAN ID, allowed range is [0 - %d]\n",
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index a76e380cf89a..9ba568db576f 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -24,4 +24,16 @@ config QCA7000
To compile this driver as a module, choose M here. The module
will be called qcaspi.
+config QCOM_EMAC
+ tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
+ select CRC32
+ select PHYLIB
+ ---help---
+ This driver supports the Qualcomm Technologies, Inc. Gigabit
+ Ethernet Media Access Controller (EMAC). The controller
+ supports IEEE 802.3-2002, half-duplex mode at 10/100 Mb/s,
+ full-duplex mode at 10/100/1000Mb/s, Wake On LAN (WOL) for
+ low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+ Precision Clock Synchronization Protocol.
+
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9da2d75db700..aacb0a585c68 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_QCA7000) += qcaspi.o
qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o
+
+obj-y += emac/
diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile
new file mode 100644
index 000000000000..01ee144c6386
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qualcomm Technologies, Inc. EMAC Gigabit Ethernet driver
+#
+
+obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
+
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
new file mode 100644
index 000000000000..e97968ed4b8f
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -0,0 +1,1528 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support
+ */
+
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <net/ip6_checksum.h>
+#include "emac.h"
+#include "emac-sgmii.h"
+
+/* EMAC base register offsets */
+#define EMAC_MAC_CTRL 0x001480
+#define EMAC_WOL_CTRL0 0x0014a0
+#define EMAC_RSS_KEY0 0x0014b0
+#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0
+#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4
+#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8
+#define EMAC_INTER_SRAM_PART9 0x001534
+#define EMAC_DESC_CTRL_0 0x001540
+#define EMAC_DESC_CTRL_1 0x001544
+#define EMAC_DESC_CTRL_2 0x001550
+#define EMAC_DESC_CTRL_10 0x001554
+#define EMAC_DESC_CTRL_12 0x001558
+#define EMAC_DESC_CTRL_13 0x00155c
+#define EMAC_DESC_CTRL_3 0x001560
+#define EMAC_DESC_CTRL_4 0x001564
+#define EMAC_DESC_CTRL_5 0x001568
+#define EMAC_DESC_CTRL_14 0x00156c
+#define EMAC_DESC_CTRL_15 0x001570
+#define EMAC_DESC_CTRL_16 0x001574
+#define EMAC_DESC_CTRL_6 0x001578
+#define EMAC_DESC_CTRL_8 0x001580
+#define EMAC_DESC_CTRL_9 0x001584
+#define EMAC_DESC_CTRL_11 0x001588
+#define EMAC_TXQ_CTRL_0 0x001590
+#define EMAC_TXQ_CTRL_1 0x001594
+#define EMAC_TXQ_CTRL_2 0x001598
+#define EMAC_RXQ_CTRL_0 0x0015a0
+#define EMAC_RXQ_CTRL_1 0x0015a4
+#define EMAC_RXQ_CTRL_2 0x0015a8
+#define EMAC_RXQ_CTRL_3 0x0015ac
+#define EMAC_BASE_CPU_NUMBER 0x0015b8
+#define EMAC_DMA_CTRL 0x0015c0
+#define EMAC_MAILBOX_0 0x0015e0
+#define EMAC_MAILBOX_5 0x0015e4
+#define EMAC_MAILBOX_6 0x0015e8
+#define EMAC_MAILBOX_13 0x0015ec
+#define EMAC_MAILBOX_2 0x0015f4
+#define EMAC_MAILBOX_3 0x0015f8
+#define EMAC_MAILBOX_11 0x00160c
+#define EMAC_AXI_MAST_CTRL 0x001610
+#define EMAC_MAILBOX_12 0x001614
+#define EMAC_MAILBOX_9 0x001618
+#define EMAC_MAILBOX_10 0x00161c
+#define EMAC_ATHR_HEADER_CTRL 0x001620
+#define EMAC_CLK_GATE_CTRL 0x001814
+#define EMAC_MISC_CTRL 0x001990
+#define EMAC_MAILBOX_7 0x0019e0
+#define EMAC_MAILBOX_8 0x0019e4
+#define EMAC_MAILBOX_15 0x001bd4
+#define EMAC_MAILBOX_16 0x001bd8
+
+/* EMAC_MAC_CTRL */
+#define SINGLE_PAUSE_MODE 0x10000000
+#define DEBUG_MODE 0x08000000
+#define BROAD_EN 0x04000000
+#define MULTI_ALL 0x02000000
+#define RX_CHKSUM_EN 0x01000000
+#define HUGE 0x00800000
+#define SPEED(x) (((x) & 0x3) << 20)
+#define SPEED_MASK SPEED(0x3)
+#define SIMR 0x00080000
+#define TPAUSE 0x00010000
+#define PROM_MODE 0x00008000
+#define VLAN_STRIP 0x00004000
+#define PRLEN_BMSK 0x00003c00
+#define PRLEN_SHFT 10
+#define HUGEN 0x00000200
+#define FLCHK 0x00000100
+#define PCRCE 0x00000080
+#define CRCE 0x00000040
+#define FULLD 0x00000020
+#define MAC_LP_EN 0x00000010
+#define RXFC 0x00000008
+#define TXFC 0x00000004
+#define RXEN 0x00000002
+#define TXEN 0x00000001
+
+
+/* EMAC_WOL_CTRL0 */
+#define LK_CHG_PME 0x20
+#define LK_CHG_EN 0x10
+#define MG_FRAME_PME 0x8
+#define MG_FRAME_EN 0x4
+#define WK_FRAME_EN 0x1
+
+/* EMAC_DESC_CTRL_3 */
+#define RFD_RING_SIZE_BMSK 0xfff
+
+/* EMAC_DESC_CTRL_4 */
+#define RX_BUFFER_SIZE_BMSK 0xffff
+
+/* EMAC_DESC_CTRL_6 */
+#define RRD_RING_SIZE_BMSK 0xfff
+
+/* EMAC_DESC_CTRL_9 */
+#define TPD_RING_SIZE_BMSK 0xffff
+
+/* EMAC_TXQ_CTRL_0 */
+#define NUM_TXF_BURST_PREF_BMSK 0xffff0000
+#define NUM_TXF_BURST_PREF_SHFT 16
+#define LS_8023_SP 0x80
+#define TXQ_MODE 0x40
+#define TXQ_EN 0x20
+#define IP_OP_SP 0x10
+#define NUM_TPD_BURST_PREF_BMSK 0xf
+#define NUM_TPD_BURST_PREF_SHFT 0
+
+/* EMAC_TXQ_CTRL_1 */
+#define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff
+
+/* EMAC_TXQ_CTRL_2 */
+#define TXF_HWM_BMSK 0xfff0000
+#define TXF_LWM_BMSK 0xfff
+
+/* EMAC_RXQ_CTRL_0 */
+#define RXQ_EN BIT(31)
+#define CUT_THRU_EN BIT(30)
+#define RSS_HASH_EN BIT(29)
+#define NUM_RFD_BURST_PREF_BMSK 0x3f00000
+#define NUM_RFD_BURST_PREF_SHFT 20
+#define IDT_TABLE_SIZE_BMSK 0x1ff00
+#define IDT_TABLE_SIZE_SHFT 8
+#define SP_IPV6 0x80
+
+/* EMAC_RXQ_CTRL_1 */
+#define JUMBO_1KAH_BMSK 0xf000
+#define JUMBO_1KAH_SHFT 12
+#define RFD_PREF_LOW_TH 0x10
+#define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0
+#define RFD_PREF_LOW_THRESHOLD_SHFT 6
+#define RFD_PREF_UP_TH 0x10
+#define RFD_PREF_UP_THRESHOLD_BMSK 0x3f
+#define RFD_PREF_UP_THRESHOLD_SHFT 0
+
+/* EMAC_RXQ_CTRL_2 */
+#define RXF_DOF_THRESFHOLD 0x1a0
+#define RXF_DOF_THRESHOLD_BMSK 0xfff0000
+#define RXF_DOF_THRESHOLD_SHFT 16
+#define RXF_UOF_THRESFHOLD 0xbe
+#define RXF_UOF_THRESHOLD_BMSK 0xfff
+#define RXF_UOF_THRESHOLD_SHFT 0
+
+/* EMAC_RXQ_CTRL_3 */
+#define RXD_TIMER_BMSK 0xffff0000
+#define RXD_THRESHOLD_BMSK 0xfff
+#define RXD_THRESHOLD_SHFT 0
+
+/* EMAC_DMA_CTRL */
+#define DMAW_DLY_CNT_BMSK 0xf0000
+#define DMAW_DLY_CNT_SHFT 16
+#define DMAR_DLY_CNT_BMSK 0xf800
+#define DMAR_DLY_CNT_SHFT 11
+#define DMAR_REQ_PRI 0x400
+#define REGWRBLEN_BMSK 0x380
+#define REGWRBLEN_SHFT 7
+#define REGRDBLEN_BMSK 0x70
+#define REGRDBLEN_SHFT 4
+#define OUT_ORDER_MODE 0x4
+#define ENH_ORDER_MODE 0x2
+#define IN_ORDER_MODE 0x1
+
+/* EMAC_MAILBOX_13 */
+#define RFD3_PROC_IDX_BMSK 0xfff0000
+#define RFD3_PROC_IDX_SHFT 16
+#define RFD3_PROD_IDX_BMSK 0xfff
+#define RFD3_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_2 */
+#define NTPD_CONS_IDX_BMSK 0xffff0000
+#define NTPD_CONS_IDX_SHFT 16
+
+/* EMAC_MAILBOX_3 */
+#define RFD0_CONS_IDX_BMSK 0xfff
+#define RFD0_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_11 */
+#define H3TPD_PROD_IDX_BMSK 0xffff0000
+#define H3TPD_PROD_IDX_SHFT 16
+
+/* EMAC_AXI_MAST_CTRL */
+#define DATA_BYTE_SWAP 0x8
+#define MAX_BOUND 0x2
+#define MAX_BTYPE 0x1
+
+/* EMAC_MAILBOX_12 */
+#define H3TPD_CONS_IDX_BMSK 0xffff0000
+#define H3TPD_CONS_IDX_SHFT 16
+
+/* EMAC_MAILBOX_9 */
+#define H2TPD_PROD_IDX_BMSK 0xffff
+#define H2TPD_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_10 */
+#define H1TPD_CONS_IDX_BMSK 0xffff0000
+#define H1TPD_CONS_IDX_SHFT 16
+#define H2TPD_CONS_IDX_BMSK 0xffff
+#define H2TPD_CONS_IDX_SHFT 0
+
+/* EMAC_ATHR_HEADER_CTRL */
+#define HEADER_CNT_EN 0x2
+#define HEADER_ENABLE 0x1
+
+/* EMAC_MAILBOX_0 */
+#define RFD0_PROC_IDX_BMSK 0xfff0000
+#define RFD0_PROC_IDX_SHFT 16
+#define RFD0_PROD_IDX_BMSK 0xfff
+#define RFD0_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_5 */
+#define RFD1_PROC_IDX_BMSK 0xfff0000
+#define RFD1_PROC_IDX_SHFT 16
+#define RFD1_PROD_IDX_BMSK 0xfff
+#define RFD1_PROD_IDX_SHFT 0
+
+/* EMAC_MISC_CTRL */
+#define RX_UNCPL_INT_EN 0x1
+
+/* EMAC_MAILBOX_7 */
+#define RFD2_CONS_IDX_BMSK 0xfff0000
+#define RFD2_CONS_IDX_SHFT 16
+#define RFD1_CONS_IDX_BMSK 0xfff
+#define RFD1_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_8 */
+#define RFD3_CONS_IDX_BMSK 0xfff
+#define RFD3_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_15 */
+#define NTPD_PROD_IDX_BMSK 0xffff
+#define NTPD_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_16 */
+#define H1TPD_PROD_IDX_BMSK 0xffff
+#define H1TPD_PROD_IDX_SHFT 0
+
+#define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20
+#define RXQ0_RSS_HSTYP_IPV6_EN 0x10
+#define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8
+#define RXQ0_RSS_HSTYP_IPV4_EN 0x4
+
+/* EMAC_EMAC_WRAPPER_TX_TS_INX */
+#define EMAC_WRAPPER_TX_TS_EMPTY BIT(31)
+#define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff
+
+struct emac_skb_cb {
+ u32 tpd_idx;
+ unsigned long jiffies;
+};
+
+#define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb)
+#define EMAC_RSS_IDT_SIZE 256
+#define JUMBO_1KAH 0x4
+#define RXD_TH 0x100
+#define EMAC_TPD_LAST_FRAGMENT 0x80000000
+#define EMAC_TPD_TSTAMP_SAVE 0x80000000
+
+/* EMAC Errors in emac_rrd.word[3] */
+#define EMAC_RRD_L4F BIT(14)
+#define EMAC_RRD_IPF BIT(15)
+#define EMAC_RRD_CRC BIT(21)
+#define EMAC_RRD_FAE BIT(22)
+#define EMAC_RRD_TRN BIT(23)
+#define EMAC_RRD_RNT BIT(24)
+#define EMAC_RRD_INC BIT(25)
+#define EMAC_RRD_FOV BIT(29)
+#define EMAC_RRD_LEN BIT(30)
+
+/* Error bits that will result in a received frame being discarded */
+#define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \
+ EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \
+ EMAC_RRD_FOV | EMAC_RRD_LEN)
+#define EMAC_RRD_STATS_DW_IDX 3
+
+#define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX)))
+#define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX)))
+#define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX)))
+
+#define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)]))
+#define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)]))
+
+#define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8
+
+#define ISR_RX_PKT (\
+ RX_PKT_INT0 |\
+ RX_PKT_INT1 |\
+ RX_PKT_INT2 |\
+ RX_PKT_INT3)
+
+#define EMAC_MAC_IRQ_RES "core0"
+
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
+{
+ u32 crc32, bit, reg, mta;
+
+ /* Calculate the CRC of the MAC address */
+ crc32 = ether_crc(ETH_ALEN, addr);
+
+ /* The HASH Table is an array of 2 32-bit registers. It is
+ * treated like an array of 64 bits (BitArray[hash_value]).
+ * Use the upper 6 bits of the above CRC as the hash value.
+ */
+ reg = (crc32 >> 31) & 0x1;
+ bit = (crc32 >> 26) & 0x1F;
+
+ mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+ mta |= BIT(bit);
+ writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+}
+
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt)
+{
+ writel(0, adpt->base + EMAC_HASH_TAB_REG0);
+ writel(0, adpt->base + EMAC_HASH_TAB_REG1);
+}
+
+/* definitions for RSS */
+#define EMAC_RSS_KEY(_i, _type) \
+ (EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
+#define EMAC_RSS_TBL(_i, _type) \
+ (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
+
+/* Config MAC modes */
+void emac_mac_mode_config(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ u32 mac;
+
+ mac = readl(adpt->base + EMAC_MAC_CTRL);
+ mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ mac |= VLAN_STRIP;
+
+ if (netdev->flags & IFF_PROMISC)
+ mac |= PROM_MODE;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ mac |= MULTI_ALL;
+
+ writel(mac, adpt->base + EMAC_MAC_CTRL);
+}
+
+/* Config descriptor rings */
+static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
+{
+ static const unsigned short tpd_q_offset[] = {
+ EMAC_DESC_CTRL_8, EMAC_H1TPD_BASE_ADDR_LO,
+ EMAC_H2TPD_BASE_ADDR_LO, EMAC_H3TPD_BASE_ADDR_LO};
+ static const unsigned short rfd_q_offset[] = {
+ EMAC_DESC_CTRL_2, EMAC_DESC_CTRL_10,
+ EMAC_DESC_CTRL_12, EMAC_DESC_CTRL_13};
+ static const unsigned short rrd_q_offset[] = {
+ EMAC_DESC_CTRL_5, EMAC_DESC_CTRL_14,
+ EMAC_DESC_CTRL_15, EMAC_DESC_CTRL_16};
+
+ /* TPD (Transmit Packet Descriptor) */
+ writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
+ adpt->base + EMAC_DESC_CTRL_1);
+
+ writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
+ adpt->base + tpd_q_offset[0]);
+
+ writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_9);
+
+ /* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */
+ writel(upper_32_bits(adpt->rx_q.rfd.dma_addr),
+ adpt->base + EMAC_DESC_CTRL_0);
+
+ writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
+ adpt->base + rfd_q_offset[0]);
+ writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
+ adpt->base + rrd_q_offset[0]);
+
+ writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_3);
+ writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_6);
+
+ writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_4);
+
+ writel(0, adpt->base + EMAC_DESC_CTRL_11);
+
+ /* Load all of the base addresses above and ensure that triggering HW to
+ * read ring pointers is flushed
+ */
+ writel(1, adpt->base + EMAC_INTER_SRAM_PART9);
+}
+
+/* Config transmit parameters */
+static void emac_mac_tx_config(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) &
+ JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1);
+
+ val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
+ NUM_TPD_BURST_PREF_BMSK;
+
+ val |= TXQ_MODE | LS_8023_SP;
+ val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
+ NUM_TXF_BURST_PREF_BMSK;
+
+ writel(val, adpt->base + EMAC_TXQ_CTRL_0);
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2,
+ (TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
+}
+
+/* Config receive parameters */
+static void emac_mac_rx_config(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
+ NUM_RFD_BURST_PREF_BMSK;
+ val |= (SP_IPV6 | CUT_THRU_EN);
+
+ writel(val, adpt->base + EMAC_RXQ_CTRL_0);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_1);
+ val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
+ RFD_PREF_UP_THRESHOLD_BMSK);
+ val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
+ (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
+ (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT);
+ writel(val, adpt->base + EMAC_RXQ_CTRL_1);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_2);
+ val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
+ val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) |
+ (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT);
+ writel(val, adpt->base + EMAC_RXQ_CTRL_2);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_3);
+ val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
+ val |= RXD_TH << RXD_THRESHOLD_SHFT;
+ writel(val, adpt->base + EMAC_RXQ_CTRL_3);
+}
+
+/* Config dma */
+static void emac_mac_dma_config(struct emac_adapter *adpt)
+{
+ u32 dma_ctrl = DMAR_REQ_PRI;
+
+ switch (adpt->dma_order) {
+ case emac_dma_ord_in:
+ dma_ctrl |= IN_ORDER_MODE;
+ break;
+ case emac_dma_ord_enh:
+ dma_ctrl |= ENH_ORDER_MODE;
+ break;
+ case emac_dma_ord_out:
+ dma_ctrl |= OUT_ORDER_MODE;
+ break;
+ default:
+ break;
+ }
+
+ dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) &
+ REGRDBLEN_BMSK;
+ dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) &
+ REGWRBLEN_BMSK;
+ dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
+ DMAR_DLY_CNT_BMSK;
+ dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
+ DMAW_DLY_CNT_BMSK;
+
+ /* config DMA and ensure that configuration is flushed to HW */
+ writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL);
+}
+
+/* set MAC address */
+static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+{
+ u32 sta;
+
+ /* for example: 00-A0-C6-11-22-33
+ * 0<-->C6112233, 1<-->00A0.
+ */
+
+ /* low 32bit word */
+ sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
+ (((u32)addr[4]) << 8) | (((u32)addr[5]));
+ writel(sta, adpt->base + EMAC_MAC_STA_ADDR0);
+
+ /* hight 32bit word */
+ sta = (((u32)addr[0]) << 8) | (u32)addr[1];
+ writel(sta, adpt->base + EMAC_MAC_STA_ADDR1);
+}
+
+static void emac_mac_config(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ unsigned int max_frame;
+ u32 val;
+
+ emac_set_mac_address(adpt, netdev->dev_addr);
+
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ?
+ ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE;
+
+ emac_mac_dma_rings_config(adpt);
+
+ writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+ adpt->base + EMAC_MAX_FRAM_LEN_CTRL);
+
+ emac_mac_tx_config(adpt);
+ emac_mac_rx_config(adpt);
+ emac_mac_dma_config(adpt);
+
+ val = readl(adpt->base + EMAC_AXI_MAST_CTRL);
+ val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
+ val |= MAX_BTYPE;
+ writel(val, adpt->base + EMAC_AXI_MAST_CTRL);
+ writel(0, adpt->base + EMAC_CLK_GATE_CTRL);
+ writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL);
+}
+
+void emac_mac_reset(struct emac_adapter *adpt)
+{
+ emac_mac_stop(adpt);
+
+ emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
+ usleep_range(100, 150); /* reset may take up to 100usec */
+
+ /* interrupt clear-on-read */
+ emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
+}
+
+void emac_mac_start(struct emac_adapter *adpt)
+{
+ struct phy_device *phydev = adpt->phydev;
+ u32 mac, csr1;
+
+ /* enable tx queue */
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN);
+
+ /* enable rx queue */
+ emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN);
+
+ /* enable mac control */
+ mac = readl(adpt->base + EMAC_MAC_CTRL);
+ csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+ mac |= TXEN | RXEN; /* enable RX/TX */
+
+ /* We don't have ethtool support yet, so force flow-control mode
+ * to 'full' always.
+ */
+ mac |= TXFC | RXFC;
+
+ /* setup link speed */
+ mac &= ~SPEED_MASK;
+ if (phydev->speed == SPEED_1000) {
+ mac |= SPEED(2);
+ csr1 |= FREQ_MODE;
+ } else {
+ mac |= SPEED(1);
+ csr1 &= ~FREQ_MODE;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL)
+ mac |= FULLD;
+ else
+ mac &= ~FULLD;
+
+ /* other parameters */
+ mac |= (CRCE | PCRCE);
+ mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK);
+ mac |= BROAD_EN;
+ mac |= FLCHK;
+ mac &= ~RX_CHKSUM_EN;
+ mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
+ DEBUG_MODE | SINGLE_PAUSE_MODE);
+
+ writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+ writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
+
+ /* enable interrupt read clear, low power sleep mode and
+ * the irq moderators
+ */
+
+ writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT);
+ writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN |
+ IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL);
+
+ emac_mac_mode_config(adpt);
+
+ emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
+ (HEADER_ENABLE | HEADER_CNT_EN), 0);
+
+ emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
+}
+
+void emac_mac_stop(struct emac_adapter *adpt)
+{
+ emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0);
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0);
+ emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0);
+ usleep_range(1000, 1050); /* stopping mac may take upto 1msec */
+}
+
+/* Free all descriptors of given transmit queue */
+static void emac_tx_q_descs_free(struct emac_adapter *adpt)
+{
+ struct emac_tx_queue *tx_q = &adpt->tx_q;
+ unsigned int i;
+ size_t size;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_q->tpd.tpbuff)
+ return;
+
+ for (i = 0; i < tx_q->tpd.count; i++) {
+ struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i);
+
+ if (tpbuf->dma_addr) {
+ dma_unmap_single(adpt->netdev->dev.parent,
+ tpbuf->dma_addr, tpbuf->length,
+ DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ }
+ if (tpbuf->skb) {
+ dev_kfree_skb_any(tpbuf->skb);
+ tpbuf->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+ memset(tx_q->tpd.tpbuff, 0, size);
+
+ /* clear the descriptor ring */
+ memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
+
+ tx_q->tpd.consume_idx = 0;
+ tx_q->tpd.produce_idx = 0;
+}
+
+/* Free all descriptors of given receive queue */
+static void emac_rx_q_free_descs(struct emac_adapter *adpt)
+{
+ struct device *dev = adpt->netdev->dev.parent;
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ unsigned int i;
+ size_t size;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_q->rfd.rfbuff)
+ return;
+
+ for (i = 0; i < rx_q->rfd.count; i++) {
+ struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i);
+
+ if (rfbuf->dma_addr) {
+ dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length,
+ DMA_FROM_DEVICE);
+ rfbuf->dma_addr = 0;
+ }
+ if (rfbuf->skb) {
+ dev_kfree_skb(rfbuf->skb);
+ rfbuf->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct emac_buffer) * rx_q->rfd.count;
+ memset(rx_q->rfd.rfbuff, 0, size);
+
+ /* clear the descriptor rings */
+ memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size);
+ rx_q->rrd.produce_idx = 0;
+ rx_q->rrd.consume_idx = 0;
+
+ memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size);
+ rx_q->rfd.produce_idx = 0;
+ rx_q->rfd.consume_idx = 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_tx_q_bufs_free(struct emac_adapter *adpt)
+{
+ struct emac_tx_queue *tx_q = &adpt->tx_q;
+
+ emac_tx_q_descs_free(adpt);
+
+ kfree(tx_q->tpd.tpbuff);
+ tx_q->tpd.tpbuff = NULL;
+ tx_q->tpd.v_addr = NULL;
+ tx_q->tpd.dma_addr = 0;
+ tx_q->tpd.size = 0;
+}
+
+/* Allocate TX descriptor ring for the given transmit queue */
+static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ size_t size;
+
+ size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+ tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL);
+ if (!tx_q->tpd.tpbuff)
+ return -ENOMEM;
+
+ tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
+ tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
+ tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(tx_q->tpd.size, 8);
+ tx_q->tpd.produce_idx = 0;
+ tx_q->tpd.consume_idx = 0;
+
+ return 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
+{
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+
+ emac_rx_q_free_descs(adpt);
+
+ kfree(rx_q->rfd.rfbuff);
+ rx_q->rfd.rfbuff = NULL;
+
+ rx_q->rfd.v_addr = NULL;
+ rx_q->rfd.dma_addr = 0;
+ rx_q->rfd.size = 0;
+
+ rx_q->rrd.v_addr = NULL;
+ rx_q->rrd.dma_addr = 0;
+ rx_q->rrd.size = 0;
+}
+
+/* Allocate RX descriptor rings for the given receive queue */
+static int emac_rx_descs_alloc(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ size_t size;
+
+ size = sizeof(struct emac_buffer) * rx_q->rfd.count;
+ rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL);
+ if (!rx_q->rfd.rfbuff)
+ return -ENOMEM;
+
+ rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
+ rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
+
+ rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
+ rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(rx_q->rrd.size, 8);
+
+ rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used;
+ rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(rx_q->rfd.size, 8);
+
+ rx_q->rrd.produce_idx = 0;
+ rx_q->rrd.consume_idx = 0;
+
+ rx_q->rfd.produce_idx = 0;
+ rx_q->rfd.consume_idx = 0;
+
+ return 0;
+}
+
+/* Allocate all TX and RX descriptor rings */
+int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct device *dev = adpt->netdev->dev.parent;
+ unsigned int num_tx_descs = adpt->tx_desc_cnt;
+ unsigned int num_rx_descs = adpt->rx_desc_cnt;
+ int ret;
+
+ adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
+
+ adpt->rx_q.rrd.count = adpt->rx_desc_cnt;
+ adpt->rx_q.rfd.count = adpt->rx_desc_cnt;
+
+ /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment,
+ * hence the additional padding bytes are allocated.
+ */
+ ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
+ num_rx_descs * (adpt->rfd_size * 4) +
+ num_rx_descs * (adpt->rrd_size * 4) +
+ 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
+
+ ring_header->used = 0;
+ ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
+ &ring_header->dma_addr,
+ GFP_KERNEL);
+ if (!ring_header->v_addr)
+ return -ENOMEM;
+
+ ring_header->used = ALIGN(ring_header->dma_addr, 8) -
+ ring_header->dma_addr;
+
+ ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
+ if (ret) {
+ netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
+ goto err_alloc_tx;
+ }
+
+ ret = emac_rx_descs_alloc(adpt);
+ if (ret) {
+ netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
+ goto err_alloc_rx;
+ }
+
+ return 0;
+
+err_alloc_rx:
+ emac_tx_q_bufs_free(adpt);
+err_alloc_tx:
+ dma_free_coherent(dev, ring_header->size,
+ ring_header->v_addr, ring_header->dma_addr);
+
+ ring_header->v_addr = NULL;
+ ring_header->dma_addr = 0;
+ ring_header->size = 0;
+ ring_header->used = 0;
+
+ return ret;
+}
+
+/* Free all TX and RX descriptor rings */
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct device *dev = adpt->netdev->dev.parent;
+
+ emac_tx_q_bufs_free(adpt);
+ emac_rx_q_bufs_free(adpt);
+
+ dma_free_coherent(dev, ring_header->size,
+ ring_header->v_addr, ring_header->dma_addr);
+
+ ring_header->v_addr = NULL;
+ ring_header->dma_addr = 0;
+ ring_header->size = 0;
+ ring_header->used = 0;
+}
+
+/* Initialize descriptor rings */
+static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt)
+{
+ unsigned int i;
+
+ adpt->tx_q.tpd.produce_idx = 0;
+ adpt->tx_q.tpd.consume_idx = 0;
+ for (i = 0; i < adpt->tx_q.tpd.count; i++)
+ adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
+
+ adpt->rx_q.rrd.produce_idx = 0;
+ adpt->rx_q.rrd.consume_idx = 0;
+ adpt->rx_q.rfd.produce_idx = 0;
+ adpt->rx_q.rfd.consume_idx = 0;
+ for (i = 0; i < adpt->rx_q.rfd.count; i++)
+ adpt->rx_q.rfd.rfbuff[i].dma_addr = 0;
+}
+
+/* Produce new receive free descriptor */
+static void emac_mac_rx_rfd_create(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q,
+ dma_addr_t addr)
+{
+ u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx);
+
+ *(hw_rfd++) = lower_32_bits(addr);
+ *hw_rfd = upper_32_bits(addr);
+
+ if (++rx_q->rfd.produce_idx == rx_q->rfd.count)
+ rx_q->rfd.produce_idx = 0;
+}
+
+/* Fill up receive queue's RFD with preallocated receive buffers */
+static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q)
+{
+ struct emac_buffer *curr_rxbuf;
+ struct emac_buffer *next_rxbuf;
+ unsigned int count = 0;
+ u32 next_produce_idx;
+
+ next_produce_idx = rx_q->rfd.produce_idx + 1;
+ if (next_produce_idx == rx_q->rfd.count)
+ next_produce_idx = 0;
+
+ curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+ next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+
+ /* this always has a blank rx_buffer*/
+ while (!next_rxbuf->dma_addr) {
+ struct sk_buff *skb;
+ int ret;
+
+ skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
+ if (!skb)
+ break;
+
+ curr_rxbuf->dma_addr =
+ dma_map_single(adpt->netdev->dev.parent, skb->data,
+ curr_rxbuf->length, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ curr_rxbuf->dma_addr);
+ if (ret) {
+ dev_kfree_skb(skb);
+ break;
+ }
+ curr_rxbuf->skb = skb;
+ curr_rxbuf->length = adpt->rxbuf_size;
+
+ emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr);
+ next_produce_idx = rx_q->rfd.produce_idx + 1;
+ if (next_produce_idx == rx_q->rfd.count)
+ next_produce_idx = 0;
+
+ curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+ next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+ count++;
+ }
+
+ if (count) {
+ u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) &
+ rx_q->produce_mask;
+ emac_reg_update32(adpt->base + rx_q->produce_reg,
+ rx_q->produce_mask, prod_idx);
+ }
+}
+
+static void emac_adjust_link(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (phydev->link)
+ emac_mac_start(adpt);
+ else
+ emac_mac_stop(adpt);
+
+ phy_print_status(phydev);
+}
+
+/* Bringup the interface/HW */
+int emac_mac_up(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ struct emac_irq *irq = &adpt->irq;
+ int ret;
+
+ emac_mac_rx_tx_ring_reset_all(adpt);
+ emac_mac_config(adpt);
+
+ ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not request %s irq\n",
+ EMAC_MAC_IRQ_RES);
+ return ret;
+ }
+
+ emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+
+ ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not connect phy\n");
+ free_irq(irq->irq, irq);
+ return ret;
+ }
+
+ /* enable mac irq */
+ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
+ writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
+
+ adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+ phy_start(adpt->phydev);
+
+ napi_enable(&adpt->rx_q.napi);
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+/* Bring down the interface/HW */
+void emac_mac_down(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+
+ netif_stop_queue(netdev);
+ napi_disable(&adpt->rx_q.napi);
+
+ phy_stop(adpt->phydev);
+ phy_disconnect(adpt->phydev);
+
+ /* disable mac irq */
+ writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
+ writel(0, adpt->base + EMAC_INT_MASK);
+ synchronize_irq(adpt->irq.irq);
+ free_irq(adpt->irq.irq, &adpt->irq);
+
+ emac_mac_reset(adpt);
+
+ emac_tx_q_descs_free(adpt);
+ netdev_reset_queue(adpt->netdev);
+ emac_rx_q_free_descs(adpt);
+}
+
+/* Consume next received packet descriptor */
+static bool emac_rx_process_rrd(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q,
+ struct emac_rrd *rrd)
+{
+ u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx);
+
+ rrd->word[3] = *(hw_rrd + 3);
+
+ if (!RRD_UPDT(rrd))
+ return false;
+
+ rrd->word[4] = 0;
+ rrd->word[5] = 0;
+
+ rrd->word[0] = *(hw_rrd++);
+ rrd->word[1] = *(hw_rrd++);
+ rrd->word[2] = *(hw_rrd++);
+
+ if (unlikely(RRD_NOR(rrd) != 1)) {
+ netdev_err(adpt->netdev,
+ "error: multi-RFD not support yet! nor:%lu\n",
+ RRD_NOR(rrd));
+ }
+
+ /* mark rrd as processed */
+ RRD_UPDT_SET(rrd, 0);
+ *hw_rrd = rrd->word[3];
+
+ if (++rx_q->rrd.consume_idx == rx_q->rrd.count)
+ rx_q->rrd.consume_idx = 0;
+
+ return true;
+}
+
+/* Produce new transmit descriptor */
+static void emac_tx_tpd_create(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
+{
+ u32 *hw_tpd;
+
+ tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
+ hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
+
+ if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
+ tx_q->tpd.produce_idx = 0;
+
+ *(hw_tpd++) = tpd->word[0];
+ *(hw_tpd++) = tpd->word[1];
+ *(hw_tpd++) = tpd->word[2];
+ *hw_tpd = tpd->word[3];
+}
+
+/* Mark the last transmit descriptor as such (for the transmit packet) */
+static void emac_tx_tpd_mark_last(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q)
+{
+ u32 *hw_tpd =
+ EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
+ u32 tmp_tpd;
+
+ tmp_tpd = *(hw_tpd + 1);
+ tmp_tpd |= EMAC_TPD_LAST_FRAGMENT;
+ *(hw_tpd + 1) = tmp_tpd;
+}
+
+static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd)
+{
+ struct emac_buffer *rfbuf = rx_q->rfd.rfbuff;
+ u32 consume_idx = RRD_SI(rrd);
+ unsigned int i;
+
+ for (i = 0; i < RRD_NOR(rrd); i++) {
+ rfbuf[consume_idx].skb = NULL;
+ if (++consume_idx == rx_q->rfd.count)
+ consume_idx = 0;
+ }
+
+ rx_q->rfd.consume_idx = consume_idx;
+ rx_q->rfd.process_idx = consume_idx;
+}
+
+/* Push the received skb to upper layers */
+static void emac_receive_skb(struct emac_rx_queue *rx_q,
+ struct sk_buff *skb,
+ u16 vlan_tag, bool vlan_flag)
+{
+ if (vlan_flag) {
+ u16 vlan;
+
+ EMAC_TAG_TO_VLAN(vlan_tag, vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ }
+
+ napi_gro_receive(&rx_q->napi, skb);
+}
+
+/* Process receive event */
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+ int *num_pkts, int max_pkts)
+{
+ u32 proc_idx, hw_consume_idx, num_consume_pkts;
+ struct net_device *netdev = adpt->netdev;
+ struct emac_buffer *rfbuf;
+ unsigned int count = 0;
+ struct emac_rrd rrd;
+ struct sk_buff *skb;
+ u32 reg;
+
+ reg = readl_relaxed(adpt->base + rx_q->consume_reg);
+
+ hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift;
+ num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ?
+ (hw_consume_idx - rx_q->rrd.consume_idx) :
+ (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx);
+
+ do {
+ if (!num_consume_pkts)
+ break;
+
+ if (!emac_rx_process_rrd(adpt, rx_q, &rrd))
+ break;
+
+ if (likely(RRD_NOR(&rrd) == 1)) {
+ /* good receive */
+ rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd));
+ dma_unmap_single(adpt->netdev->dev.parent,
+ rfbuf->dma_addr, rfbuf->length,
+ DMA_FROM_DEVICE);
+ rfbuf->dma_addr = 0;
+ skb = rfbuf->skb;
+ } else {
+ netdev_err(adpt->netdev,
+ "error: multi-RFD not support yet!\n");
+ break;
+ }
+ emac_rx_rfd_clean(rx_q, &rrd);
+ num_consume_pkts--;
+ count++;
+
+ /* Due to a HW issue in L4 check sum detection (UDP/TCP frags
+ * with DF set are marked as error), drop packets based on the
+ * error mask rather than the summary bit (ignoring L4F errors)
+ */
+ if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) {
+ netif_dbg(adpt, rx_status, adpt->netdev,
+ "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n",
+ rrd.word[0], rrd.word[1],
+ rrd.word[2], rrd.word[3]);
+
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = RRD_L4F(&rrd) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
+ (bool)RRD_CVTAG(&rrd));
+
+ netdev->last_rx = jiffies;
+ (*num_pkts)++;
+ } while (*num_pkts < max_pkts);
+
+ if (count) {
+ proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) &
+ rx_q->process_mask;
+ emac_reg_update32(adpt->base + rx_q->process_reg,
+ rx_q->process_mask, proc_idx);
+ emac_mac_rx_descs_refill(adpt, rx_q);
+ }
+}
+
+/* get the number of free transmit descriptors */
+static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q)
+{
+ u32 produce_idx = tx_q->tpd.produce_idx;
+ u32 consume_idx = tx_q->tpd.consume_idx;
+
+ return (consume_idx > produce_idx) ?
+ (consume_idx - produce_idx - 1) :
+ (tx_q->tpd.count + consume_idx - produce_idx - 1);
+}
+
+/* Process transmit event */
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
+{
+ u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg);
+ u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0;
+ struct emac_buffer *tpbuf;
+
+ hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift;
+
+ while (tx_q->tpd.consume_idx != hw_consume_idx) {
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
+ if (tpbuf->dma_addr) {
+ dma_unmap_single(adpt->netdev->dev.parent,
+ tpbuf->dma_addr, tpbuf->length,
+ DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ }
+
+ if (tpbuf->skb) {
+ pkts_compl++;
+ bytes_compl += tpbuf->skb->len;
+ dev_kfree_skb_irq(tpbuf->skb);
+ tpbuf->skb = NULL;
+ }
+
+ if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
+ tx_q->tpd.consume_idx = 0;
+ }
+
+ netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl);
+
+ if (netif_queue_stopped(adpt->netdev))
+ if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(adpt->netdev);
+}
+
+/* Initialize all queue data structures */
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ adpt->rx_q.netdev = adpt->netdev;
+
+ adpt->rx_q.produce_reg = EMAC_MAILBOX_0;
+ adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK;
+ adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT;
+
+ adpt->rx_q.process_reg = EMAC_MAILBOX_0;
+ adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK;
+ adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT;
+
+ adpt->rx_q.consume_reg = EMAC_MAILBOX_3;
+ adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK;
+ adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT;
+
+ adpt->rx_q.irq = &adpt->irq;
+ adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT;
+
+ adpt->tx_q.produce_reg = EMAC_MAILBOX_15;
+ adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK;
+ adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT;
+
+ adpt->tx_q.consume_reg = EMAC_MAILBOX_2;
+ adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK;
+ adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT;
+}
+
+/* Fill up transmit descriptors with TSO and Checksum offload information */
+static int emac_tso_csum(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb,
+ struct emac_tpd *tpd)
+{
+ unsigned int hdr_len;
+ int ret;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
+ + ntohs(ip_hdr(skb)->tot_len);
+ if (skb->len > pkt_len)
+ pskb_trim(skb, pkt_len);
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (unlikely(skb->len == hdr_len)) {
+ /* we only need to do csum */
+ netif_warn(adpt, tx_err, adpt->netdev,
+ "tso not needed for packet with 0 data\n");
+ goto do_csum;
+ }
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ TPD_IPV4_SET(tpd, 1);
+ }
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ /* ipv6 tso need an extra tpd */
+ struct emac_tpd extra_tpd;
+
+ memset(tpd, 0, sizeof(*tpd));
+ memset(&extra_tpd, 0, sizeof(extra_tpd));
+
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ TPD_PKT_LEN_SET(&extra_tpd, skb->len);
+ TPD_LSO_SET(&extra_tpd, 1);
+ TPD_LSOV_SET(&extra_tpd, 1);
+ emac_tx_tpd_create(adpt, tx_q, &extra_tpd);
+ TPD_LSOV_SET(tpd, 1);
+ }
+
+ TPD_LSO_SET(tpd, 1);
+ TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
+ TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
+ return 0;
+ }
+
+do_csum:
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int css, cso;
+
+ cso = skb_transport_offset(skb);
+ if (unlikely(cso & 0x1)) {
+ netdev_err(adpt->netdev,
+ "error: payload offset should be even\n");
+ return -EINVAL;
+ }
+ css = cso + skb->csum_offset;
+
+ TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
+ TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
+ TPD_CSX_SET(tpd, 1);
+ }
+
+ return 0;
+}
+
+/* Fill up transmit descriptors */
+static void emac_tx_fill_tpd(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q, struct sk_buff *skb,
+ struct emac_tpd *tpd)
+{
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int first = tx_q->tpd.produce_idx;
+ unsigned int len = skb_headlen(skb);
+ struct emac_buffer *tpbuf = NULL;
+ unsigned int mapped_len = 0;
+ unsigned int i;
+ int count = 0;
+ int ret;
+
+ /* if Large Segment Offload is (in TCP Segmentation Offload struct) */
+ if (TPD_LSO(tpd)) {
+ mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = mapped_len;
+ tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
+ skb->data, tpbuf->length,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ if (mapped_len < len) {
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = len - mapped_len;
+ tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
+ skb->data + mapped_len,
+ tpbuf->length, DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = frag->size;
+ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+ frag->page.p, frag->page_offset,
+ tpbuf->length, DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ /* The last tpd */
+ wmb();
+ emac_tx_tpd_mark_last(adpt, tx_q);
+
+ /* The last buffer info contain the skb address,
+ * so it will be freed after unmap
+ */
+ tpbuf->skb = skb;
+
+ return;
+
+error:
+ /* One of the memory mappings failed, so undo everything */
+ tx_q->tpd.produce_idx = first;
+
+ while (count--) {
+ tpbuf = GET_TPD_BUFFER(tx_q, first);
+ dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr,
+ tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ tpbuf->length = 0;
+
+ if (++first == tx_q->tpd.count)
+ first = 0;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+/* Transmit the packet using specified transmit queue */
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
+{
+ struct emac_tpd tpd;
+ u32 prod_idx;
+
+ memset(&tpd, 0, sizeof(tpd));
+
+ if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 tag;
+
+ EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
+ TPD_CVLAN_TAG_SET(&tpd, tag);
+ TPD_INSTC_SET(&tpd, 1);
+ }
+
+ if (skb_network_offset(skb) != ETH_HLEN)
+ TPD_TYP_SET(&tpd, 1);
+
+ emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
+
+ netdev_sent_queue(adpt->netdev, skb->len);
+
+ /* Make sure the are enough free descriptors to hold one
+ * maximum-sized SKB. We need one desc for each fragment,
+ * one for the checksum (emac_tso_csum), one for TSO, and
+ * and one for the SKB header.
+ */
+ if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
+ netif_stop_queue(adpt->netdev);
+
+ /* update produce idx */
+ prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &
+ tx_q->produce_mask;
+ emac_reg_update32(adpt->base + tx_q->produce_reg,
+ tx_q->produce_mask, prod_idx);
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
new file mode 100644
index 000000000000..f3aa24dc4a29
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* EMAC DMA HW engine uses three rings:
+ * Tx:
+ * TPD: Transmit Packet Descriptor ring.
+ * Rx:
+ * RFD: Receive Free Descriptor ring.
+ * Ring of descriptors with empty buffers to be filled by Rx HW.
+ * RRD: Receive Return Descriptor ring.
+ * Ring of descriptors with buffers filled with received data.
+ */
+
+#ifndef _EMAC_HW_H_
+#define _EMAC_HW_H_
+
+/* EMAC_CSR register offsets */
+#define EMAC_EMAC_WRAPPER_CSR1 0x000000
+#define EMAC_EMAC_WRAPPER_CSR2 0x000004
+#define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104
+#define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108
+#define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c
+
+/* DMA Order Settings */
+enum emac_dma_order {
+ emac_dma_ord_in = 1,
+ emac_dma_ord_enh = 2,
+ emac_dma_ord_out = 4
+};
+
+enum emac_dma_req_block {
+ emac_dma_req_128 = 0,
+ emac_dma_req_256 = 1,
+ emac_dma_req_512 = 2,
+ emac_dma_req_1024 = 3,
+ emac_dma_req_2048 = 4,
+ emac_dma_req_4096 = 5
+};
+
+/* Returns the value of bits idx...idx+n_bits */
+#define BITS_GET(val, lo, hi) ((le32_to_cpu(val) & GENMASK((hi), (lo))) >> lo)
+#define BITS_SET(val, lo, hi, new_val) \
+ val = cpu_to_le32((le32_to_cpu(val) & (~GENMASK((hi), (lo)))) | \
+ (((new_val) << (lo)) & GENMASK((hi), (lo))))
+
+/* RRD (Receive Return Descriptor) */
+struct emac_rrd {
+ u32 word[6];
+
+/* number of RFD */
+#define RRD_NOR(rrd) BITS_GET((rrd)->word[0], 16, 19)
+/* start consumer index of rfd-ring */
+#define RRD_SI(rrd) BITS_GET((rrd)->word[0], 20, 31)
+/* vlan-tag (CVID, CFI and PRI) */
+#define RRD_CVALN_TAG(rrd) BITS_GET((rrd)->word[2], 0, 15)
+/* length of the packet */
+#define RRD_PKT_SIZE(rrd) BITS_GET((rrd)->word[3], 0, 13)
+/* L4(TCP/UDP) checksum failed */
+#define RRD_L4F(rrd) BITS_GET((rrd)->word[3], 14, 14)
+/* vlan tagged */
+#define RRD_CVTAG(rrd) BITS_GET((rrd)->word[3], 16, 16)
+/* When set, indicates that the descriptor is updated by the IP core.
+ * When cleared, indicates that the descriptor is invalid.
+ */
+#define RRD_UPDT(rrd) BITS_GET((rrd)->word[3], 31, 31)
+#define RRD_UPDT_SET(rrd, val) BITS_SET((rrd)->word[3], 31, 31, val)
+/* timestamp low */
+#define RRD_TS_LOW(rrd) BITS_GET((rrd)->word[4], 0, 29)
+/* timestamp high */
+#define RRD_TS_HI(rrd) le32_to_cpu((rrd)->word[5])
+};
+
+/* TPD (Transmit Packet Descriptor) */
+struct emac_tpd {
+ u32 word[4];
+
+/* Number of bytes of the transmit packet. (include 4-byte CRC) */
+#define TPD_BUF_LEN_SET(tpd, val) BITS_SET((tpd)->word[0], 0, 15, val)
+/* Custom Checksum Offload: When set, ask IP core to offload custom checksum */
+#define TPD_CSX_SET(tpd, val) BITS_SET((tpd)->word[1], 8, 8, val)
+/* TCP Large Send Offload: When set, ask IP core to do offload TCP Large Send */
+#define TPD_LSO(tpd) BITS_GET((tpd)->word[1], 12, 12)
+#define TPD_LSO_SET(tpd, val) BITS_SET((tpd)->word[1], 12, 12, val)
+/* Large Send Offload Version: When set, indicates this is an LSOv2
+ * (for both IPv4 and IPv6). When cleared, indicates this is an LSOv1
+ * (only for IPv4).
+ */
+#define TPD_LSOV_SET(tpd, val) BITS_SET((tpd)->word[1], 13, 13, val)
+/* IPv4 packet: When set, indicates this is an IPv4 packet, this bit is only
+ * for LSOV2 format.
+ */
+#define TPD_IPV4_SET(tpd, val) BITS_SET((tpd)->word[1], 16, 16, val)
+/* 0: Ethernet frame (DA+SA+TYPE+DATA+CRC)
+ * 1: IEEE 802.3 frame (DA+SA+LEN+DSAP+SSAP+CTL+ORG+TYPE+DATA+CRC)
+ */
+#define TPD_TYP_SET(tpd, val) BITS_SET((tpd)->word[1], 17, 17, val)
+/* Low-32bit Buffer Address */
+#define TPD_BUFFER_ADDR_L_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val))
+/* CVLAN Tag to be inserted if INS_VLAN_TAG is set, CVLAN TPID based on global
+ * register configuration.
+ */
+#define TPD_CVLAN_TAG_SET(tpd, val) BITS_SET((tpd)->word[3], 0, 15, val)
+/* Insert CVlan Tag: When set, ask MAC to insert CVLAN TAG to outgoing packet
+ */
+#define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val)
+/* High-14bit Buffer Address, So, the 64b-bit address is
+ * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ */
+#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val)
+/* Format D. Word offset from the 1st byte of this packet to start to calculate
+ * the custom checksum.
+ */
+#define TPD_PAYLOAD_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val)
+/* Format D. Word offset from the 1st byte of this packet to fill the custom
+ * checksum to
+ */
+#define TPD_CXSUM_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 25, val)
+
+/* Format C. TCP Header offset from the 1st byte of this packet. (byte unit) */
+#define TPD_TCPHDR_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val)
+/* Format C. MSS (Maximum Segment Size) got from the protocol layer. (byte unit)
+ */
+#define TPD_MSS_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 30, val)
+/* packet length in ext tpd */
+#define TPD_PKT_LEN_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val))
+};
+
+/* emac_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd)
+ */
+struct emac_ring_header {
+ void *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+ size_t size; /* length in bytes */
+ size_t used;
+};
+
+/* emac_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct emac_buffer {
+ struct sk_buff *skb; /* socket buffer */
+ u16 length; /* rx buffer length */
+ dma_addr_t dma_addr; /* dma address */
+};
+
+/* receive free descriptor (rfd) ring */
+struct emac_rfd_ring {
+ struct emac_buffer *rfbuff;
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx;
+ unsigned int process_idx;
+ unsigned int consume_idx; /* unused */
+};
+
+/* Receive Return Desciptor (RRD) ring */
+struct emac_rrd_ring {
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* physical address */
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx; /* unused */
+ unsigned int consume_idx;
+};
+
+/* Rx queue */
+struct emac_rx_queue {
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct emac_rrd_ring rrd;
+ struct emac_rfd_ring rfd;
+ struct napi_struct napi;
+ struct emac_irq *irq;
+
+ u32 intr;
+ u32 produce_mask;
+ u32 process_mask;
+ u32 consume_mask;
+
+ u16 produce_reg;
+ u16 process_reg;
+ u16 consume_reg;
+
+ u8 produce_shift;
+ u8 process_shft;
+ u8 consume_shift;
+};
+
+/* Transimit Packet Descriptor (tpd) ring */
+struct emac_tpd_ring {
+ struct emac_buffer *tpbuff;
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx;
+ unsigned int consume_idx;
+ unsigned int last_produce_idx;
+};
+
+/* Tx queue */
+struct emac_tx_queue {
+ struct emac_tpd_ring tpd;
+
+ u32 produce_mask;
+ u32 consume_mask;
+
+ u16 max_packets; /* max packets per interrupt */
+ u16 produce_reg;
+ u16 consume_reg;
+
+ u8 produce_shift;
+ u8 consume_shift;
+};
+
+struct emac_adapter;
+
+int emac_mac_up(struct emac_adapter *adpt);
+void emac_mac_down(struct emac_adapter *adpt);
+void emac_mac_reset(struct emac_adapter *adpt);
+void emac_mac_start(struct emac_adapter *adpt);
+void emac_mac_stop(struct emac_adapter *adpt);
+void emac_mac_mode_config(struct emac_adapter *adpt);
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+ int *num_pkts, int max_pkts);
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+ struct emac_adapter *adpt);
+int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt);
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr);
+
+#endif /*_EMAC_HW_H_*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
new file mode 100644
index 000000000000..da4e90db4d98
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -0,0 +1,227 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+/* EMAC base register offsets */
+#define EMAC_MDIO_CTRL 0x001414
+#define EMAC_PHY_STS 0x001418
+#define EMAC_MDIO_EX_CTRL 0x001440
+
+/* EMAC_MDIO_CTRL */
+#define MDIO_MODE BIT(30)
+#define MDIO_PR BIT(29)
+#define MDIO_AP_EN BIT(28)
+#define MDIO_BUSY BIT(27)
+#define MDIO_CLK_SEL_BMSK 0x7000000
+#define MDIO_CLK_SEL_SHFT 24
+#define MDIO_START BIT(23)
+#define SUP_PREAMBLE BIT(22)
+#define MDIO_RD_NWR BIT(21)
+#define MDIO_REG_ADDR_BMSK 0x1f0000
+#define MDIO_REG_ADDR_SHFT 16
+#define MDIO_DATA_BMSK 0xffff
+#define MDIO_DATA_SHFT 0
+
+/* EMAC_PHY_STS */
+#define PHY_ADDR_BMSK 0x1f0000
+#define PHY_ADDR_SHFT 16
+
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_28 7
+
+#define MDIO_WAIT_TIMES 1000
+
+#define EMAC_LINK_SPEED_DEFAULT (\
+ EMAC_LINK_SPEED_10_HALF |\
+ EMAC_LINK_SPEED_10_FULL |\
+ EMAC_LINK_SPEED_100_HALF |\
+ EMAC_LINK_SPEED_100_FULL |\
+ EMAC_LINK_SPEED_1GB_FULL)
+
+/**
+ * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+ * @adpt: the emac adapter
+ *
+ * The autopoll feature takes over the MDIO bus. In order for
+ * the PHY driver to be able to talk to the PHY over the MDIO
+ * bus, we need to temporarily disable the autopoll feature.
+ */
+static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ /* disable autopoll */
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
+
+ /* wait for any mdio polling to complete */
+ if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
+ !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
+ return 0;
+
+ /* failed to disable; ensure it is enabled before returning */
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+
+ return -EBUSY;
+}
+
+/**
+ * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+ * @adpt: the emac adapter
+ *
+ * The EMAC has the ability to poll the external PHY on the MDIO
+ * bus for link state changes. This eliminates the need for the
+ * driver to poll the phy. If if the link state does change,
+ * the EMAC issues an interrupt on behalf of the PHY.
+ */
+static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
+{
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+}
+
+static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+ int ret;
+
+ ret = emac_phy_mdio_autopoll_disable(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+
+ reg = SUP_PREAMBLE |
+ ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+ ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+ MDIO_START | MDIO_RD_NWR;
+
+ writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ 100, MDIO_WAIT_TIMES * 100))
+ ret = -EIO;
+ else
+ ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+
+ emac_phy_mdio_autopoll_enable(adpt);
+
+ return ret;
+}
+
+static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+ int ret;
+
+ ret = emac_phy_mdio_autopoll_disable(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+
+ reg = SUP_PREAMBLE |
+ ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+ ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+ ((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) |
+ MDIO_START;
+
+ writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)), 100,
+ MDIO_WAIT_TIMES * 100))
+ ret = -EIO;
+
+ emac_phy_mdio_autopoll_enable(adpt);
+
+ return ret;
+}
+
+/* Configure the MDIO bus and connect the external PHY */
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *mii_bus;
+ int ret;
+
+ /* Create the mii_bus object for talking to the MDIO bus */
+ adpt->mii_bus = mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "emac-mdio";
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+ mii_bus->read = emac_mdio_read;
+ mii_bus->write = emac_mdio_write;
+ mii_bus->parent = &pdev->dev;
+ mii_bus->priv = adpt;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ u32 phy_addr;
+
+ ret = mdiobus_register(mii_bus);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+ ret = device_property_read_u32(&pdev->dev, "phy-channel",
+ &phy_addr);
+ if (ret)
+ /* If we can't read a valid phy address, then assume
+ * that there is only one phy on this mdio bus.
+ */
+ adpt->phydev = phy_find_first(mii_bus);
+ else
+ adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+
+ } else {
+ struct device_node *phy_np;
+
+ ret = of_mdiobus_register(mii_bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ adpt->phydev = of_phy_find_device(phy_np);
+ }
+
+ if (!adpt->phydev) {
+ dev_err(&pdev->dev, "could not find external phy\n");
+ mdiobus_unregister(mii_bus);
+ return -ENODEV;
+ }
+
+ if (adpt->phydev->drv)
+ phy_attached_print(adpt->phydev, NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
new file mode 100644
index 000000000000..49f3701a6dd7
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _EMAC_PHY_H_
+#define _EMAC_PHY_H_
+
+typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
+
+/** emac_phy - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @initialize initialization function
+ */
+struct emac_phy {
+ void __iomem *base;
+ void __iomem *digital;
+ emac_sgmii_initialize initialize;
+};
+
+struct emac_adapter;
+
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
+
+#endif /* _EMAC_PHY_H_ */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
new file mode 100644
index 000000000000..75c1b530e39e
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include <linux/of_device.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-sgmii.h"
+
+/* EMAC_QSERDES register offsets */
+#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000
+#define EMAC_QSERDES_COM_PLL_CNTRL 0x000014
+#define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018
+#define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024
+#define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028
+#define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c
+#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038
+#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040
+#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044
+#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048
+#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c
+#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050
+#define EMAC_QSERDES_COM_DEC_START1 0x000064
+#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098
+#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c
+#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0
+#define EMAC_QSERDES_COM_DEC_START2 0x0000a4
+#define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac
+#define EMAC_QSERDES_COM_RESET_SM 0x0000bc
+#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100
+#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108
+#define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c
+#define EMAC_QSERDES_TX_LANE_MODE 0x000150
+#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170
+#define EMAC_QSERDES_RX_CDR_CONTROL 0x000200
+#define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210
+#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_SERDES_START 0x000000
+#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000004
+#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000008
+#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00000C
+#define EMAC_SGMII_PHY_LANE_CTRL1 0x000018
+#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000048
+#define EMAC_SGMII_PHY_CDR_CTRL0 0x000058
+#define EMAC_SGMII_PHY_SPEED_CFG1 0x000074
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000080
+#define EMAC_SGMII_PHY_RESET_CTRL 0x0000a8
+#define EMAC_SGMII_PHY_IRQ_CMD 0x0000ac
+#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0000b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0000b4
+#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0000b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0000d4
+#define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0000e0
+#define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0000e4
+
+/* EMAC_QSERDES_COM_PLL_IP_SETI */
+#define PLL_IPSETI(x) ((x) & 0x3f)
+
+/* EMAC_QSERDES_COM_PLL_CP_SETI */
+#define PLL_CPSETI(x) ((x) & 0xff)
+
+/* EMAC_QSERDES_COM_PLL_IP_SETP */
+#define PLL_IPSETP(x) ((x) & 0x3f)
+
+/* EMAC_QSERDES_COM_PLL_CP_SETP */
+#define PLL_CPSETP(x) ((x) & 0x1f)
+
+/* EMAC_QSERDES_COM_PLL_CRCTRL */
+#define PLL_RCTRL(x) (((x) & 0xf) << 4)
+#define PLL_CCTRL(x) ((x) & 0xf)
+
+/* SGMII v2 PHY registers per lane */
+#define EMAC_SGMII_PHY_LN_OFFSET 0x0400
+
+/* SGMII v2 digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0 0x00C
+#define EMAC_SGMII_LN_DRVR_TAP_EN 0x018
+#define EMAC_SGMII_LN_TX_MARGINING 0x01C
+#define EMAC_SGMII_LN_TX_PRE 0x020
+#define EMAC_SGMII_LN_TX_POST 0x024
+#define EMAC_SGMII_LN_TX_BAND_MODE 0x060
+#define EMAC_SGMII_LN_LANE_MODE 0x064
+#define EMAC_SGMII_LN_PARALLEL_RATE 0x078
+#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0
+#define EMAC_SGMII_LN_VGA_INITVAL 0x134
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194
+#define EMAC_SGMII_LN_RX_BAND 0x19C
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8
+#define EMAC_SGMII_LN_RSM_CONFIG 0x1F0
+#define EMAC_SGMII_LN_SIGDET_ENABLES 0x224
+#define EMAC_SGMII_LN_SIGDET_CNTRL 0x228
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C
+#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC
+
+/* SGMII v2 digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
+#define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f)
+#define UCDR_ENABLE BIT(6)
+#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
+#define SIGDET_LP_BYP_PS4 BIT(7)
+#define SIGDET_EN_PS0_TO_PS2 BIT(6)
+#define EN_ACCOUPLEVCM_SW_MUX BIT(5)
+#define EN_ACCOUPLEVCM_SW BIT(4)
+#define RX_SYNC_EN BIT(3)
+#define RXTERM_HIGHZ_PS5 BIT(2)
+#define SIGDET_EN_PS3 BIT(1)
+#define EN_ACCOUPLE_VCM_PS3 BIT(0)
+#define UFS_MODE BIT(5)
+#define TXVAL_VALID_INIT BIT(4)
+#define TXVAL_VALID_MUX BIT(3)
+#define TXVAL_VALID BIT(2)
+#define USB3P1_MODE BIT(1)
+#define KR_PCIGEN3_MODE BIT(0)
+#define PRE_EN BIT(3)
+#define POST_EN BIT(2)
+#define MAIN_EN_MUX BIT(1)
+#define MAIN_EN BIT(0)
+#define TX_MARGINING_MUX BIT(6)
+#define TX_MARGINING(x) ((x) & 0x3f)
+#define TX_PRE_MUX BIT(6)
+#define TX_PRE(x) ((x) & 0x3f)
+#define TX_POST_MUX BIT(6)
+#define TX_POST(x) ((x) & 0x3f)
+#define CML_GEAR_MODE(x) (((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x) ((x) & 3)
+#define VGA_THRESH_DFE(x) ((x) & 0x3f)
+#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
+#define SIGDET_LP_BYP_MUX BIT(4)
+#define SIGDET_LP_BYP BIT(3)
+#define SIGDET_EN_MUX BIT(2)
+#define SIGDET_EN BIT(1)
+#define SIGDET_FLT_BYP BIT(0)
+#define SIGDET_LVL(x) (((x) & 0xf) << 4)
+#define SIGDET_BW_CTRL(x) ((x) & 0xf)
+#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
+#define SIGDET_DEGLITCH_BYP BIT(0)
+#define INVERT_PCS_RX_CLK BIT(7)
+#define PWM_EN BIT(6)
+#define RXBIAS_SEL(x) (((x) & 0x3) << 4)
+#define EBDAC_SIGN BIT(3)
+#define EDAC_SIGN BIT(2)
+#define EN_AUXTAP1SIGN_INVERT BIT(1)
+#define EN_DAC_CHOPPING BIT(0)
+#define DRVR_LOGIC_CLK_EN BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
+#define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4)
+#define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2)
+#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
+#define BAND_MODE2(x) (((x) & 0x3) << 4)
+#define BAND_MODE1(x) (((x) & 0x3) << 2)
+#define BAND_MODE0(x) ((x) & 0x3)
+#define LANE_SYNC_MODE BIT(5)
+#define LANE_MODE(x) ((x) & 0x1f)
+#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
+#define EN_DLL_MODE0 BIT(4)
+#define EN_IQ_DCC_MODE0 BIT(3)
+#define EN_IQCAL_MODE0 BIT(2)
+#define EN_QPATH_MODE0 BIT(1)
+#define EN_EPATH_MODE0 BIT(0)
+#define FORCE_TSYNC_ACK BIT(7)
+#define FORCE_CMN_ACK BIT(6)
+#define FORCE_CMN_READY BIT(5)
+#define EN_RCLK_DEGLITCH BIT(4)
+#define BYPASS_RSM_CDR_RESET BIT(3)
+#define BYPASS_RSM_TSYNC BIT(2)
+#define BYPASS_RSM_SAMP_CAL BIT(1)
+#define BYPASS_RSM_DLL_CAL BIT(0)
+
+/* EMAC_QSERDES_COM_SYS_CLK_CTRL */
+#define SYSCLK_CM BIT(4)
+#define SYSCLK_AC_COUPLE BIT(3)
+
+/* EMAC_QSERDES_COM_PLL_CNTRL */
+#define OCP_EN BIT(5)
+#define PLL_DIV_FFEN BIT(2)
+#define PLL_DIV_ORD BIT(1)
+
+/* EMAC_QSERDES_COM_SYSCLK_EN_SEL */
+#define SYSCLK_SEL_CMOS BIT(3)
+
+/* EMAC_QSERDES_COM_RESETSM_CNTRL */
+#define FRQ_TUNE_MODE BIT(4)
+
+/* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */
+#define PLLLOCK_CMP_EN BIT(0)
+
+/* EMAC_QSERDES_COM_DEC_START1 */
+#define DEC_START1_MUX BIT(7)
+#define DEC_START1(x) ((x) & 0x7f)
+
+/* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */
+#define DIV_FRAC_START_MUX BIT(7)
+#define DIV_FRAC_START(x) ((x) & 0x7f)
+
+/* EMAC_QSERDES_COM_DIV_FRAC_START3 */
+#define DIV_FRAC_START3_MUX BIT(4)
+#define DIV_FRAC_START3(x) ((x) & 0xf)
+
+/* EMAC_QSERDES_COM_DEC_START2 */
+#define DEC_START2_MUX BIT(1)
+#define DEC_START2 BIT(0)
+
+/* EMAC_QSERDES_COM_RESET_SM */
+#define READY BIT(5)
+
+/* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */
+#define TX_EMP_POST1_LVL_MUX BIT(5)
+#define TX_EMP_POST1_LVL(x) ((x) & 0x1f)
+#define TX_EMP_POST1_LVL_BMSK 0x1f
+#define TX_EMP_POST1_LVL_SHFT 0
+
+/* EMAC_QSERDES_TX_TX_DRV_LVL */
+#define TX_DRV_LVL_MUX BIT(4)
+#define TX_DRV_LVL(x) ((x) & 0xf)
+
+/* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */
+#define EMP_EN_MUX BIT(1)
+#define EMP_EN BIT(0)
+
+/* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */
+#define HBW_PD_EN BIT(7)
+#define SECONDORDERENABLE BIT(6)
+#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3)
+#define SECONDORDERGAIN(x) ((x) & 0x7)
+
+/* EMAC_QSERDES_RX_RX_EQ_GAIN12 */
+#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4)
+#define RX_EQ_GAIN1(x) ((x) & 0xf)
+
+/* EMAC_SGMII_PHY_SERDES_START */
+#define SERDES_START BIT(0)
+
+/* EMAC_SGMII_PHY_CMN_PWR_CTRL */
+#define BIAS_EN BIT(6)
+#define PLL_EN BIT(5)
+#define SYSCLK_EN BIT(4)
+#define CLKBUF_L_EN BIT(3)
+#define PLL_TXCLK_EN BIT(1)
+#define PLL_RXCLK_EN BIT(0)
+
+/* EMAC_SGMII_PHY_RX_PWR_CTRL */
+#define L0_RX_SIGDET_EN BIT(7)
+#define L0_RX_TERM_MODE(x) (((x) & 3) << 4)
+#define L0_RX_I_EN BIT(1)
+
+/* EMAC_SGMII_PHY_TX_PWR_CTRL */
+#define L0_TX_EN BIT(5)
+#define L0_CLKBUF_EN BIT(4)
+#define L0_TRAN_BIAS_EN BIT(1)
+
+/* EMAC_SGMII_PHY_LANE_CTRL1 */
+#define L0_RX_EQUALIZE_ENABLE BIT(6)
+#define L0_RESET_TSYNC_EN BIT(4)
+#define L0_DRV_LVL(x) ((x) & 0xf)
+
+/* EMAC_SGMII_PHY_AUTONEG_CFG2 */
+#define FORCE_AN_TX_CFG BIT(5)
+#define FORCE_AN_RX_CFG BIT(4)
+#define AN_ENABLE BIT(0)
+
+/* EMAC_SGMII_PHY_SPEED_CFG1 */
+#define DUPLEX_MODE BIT(4)
+#define SPDMODE_1000 BIT(1)
+#define SPDMODE_100 BIT(0)
+#define SPDMODE_10 0
+#define SPDMODE_BMSK 3
+#define SPDMODE_SHFT 0
+
+/* EMAC_SGMII_PHY_POW_DWN_CTRL0 */
+#define PWRDN_B BIT(0)
+#define CDR_MAX_CNT(x) ((x) & 0xff)
+
+/* EMAC_QSERDES_TX_BIST_MODE_LANENO */
+#define BIST_LANE_NUMBER(x) (((x) & 3) << 5)
+#define BISTMODE(x) ((x) & 0x1f)
+
+/* EMAC_QSERDES_COM_PLLLOCK_CMPx */
+#define PLLLOCK_CMP(x) ((x) & 0xff)
+
+/* EMAC_SGMII_PHY_RESET_CTRL */
+#define PHY_SW_RESET BIT(0)
+
+/* EMAC_SGMII_PHY_IRQ_CMD */
+#define IRQ_GLOBAL_CLEAR BIT(0)
+
+/* EMAC_SGMII_PHY_INTERRUPT_MASK */
+#define DECODE_CODE_ERR BIT(7)
+#define DECODE_DISP_ERR BIT(6)
+#define PLL_UNLOCK BIT(5)
+#define AN_ILLEGAL_TERM BIT(4)
+#define SYNC_FAIL BIT(3)
+#define AN_START BIT(2)
+#define AN_END BIT(1)
+#define AN_REQUEST BIT(0)
+
+#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
+
+#define SGMII_PHY_INTERRUPT_ERR (\
+ DECODE_CODE_ERR |\
+ DECODE_DISP_ERR)
+
+#define SGMII_ISR_AN_MASK (\
+ AN_REQUEST |\
+ AN_START |\
+ AN_END |\
+ AN_ILLEGAL_TERM |\
+ PLL_UNLOCK |\
+ SYNC_FAIL)
+
+#define SGMII_ISR_MASK (\
+ SGMII_PHY_INTERRUPT_ERR |\
+ SGMII_ISR_AN_MASK)
+
+/* SGMII TX_CONFIG */
+#define TXCFG_LINK 0x8000
+#define TXCFG_MODE_BMSK 0x1c00
+#define TXCFG_1000_FULL 0x1800
+#define TXCFG_100_FULL 0x1400
+#define TXCFG_100_HALF 0x0400
+#define TXCFG_10_FULL 0x1000
+#define TXCFG_10_HALF 0x0000
+
+#define SERDES_START_WAIT_TIMES 100
+
+struct emac_reg_write {
+ unsigned int offset;
+ u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+ const struct emac_reg_write *itr, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++itr, ++i)
+ writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write physical_coding_sublayer_programming_v1[] = {
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN},
+ {EMAC_SGMII_PHY_RX_PWR_CTRL,
+ L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN |
+ PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_LANE_CTRL1,
+ L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)},
+};
+
+static const struct emac_reg_write sysclk_refclk_setting[] = {
+ {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
+ {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE},
+};
+
+static const struct emac_reg_write pll_setting[] = {
+ {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
+ {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
+ {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
+ {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
+ {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
+ {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD},
+ {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
+ {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
+ {EMAC_QSERDES_COM_DIV_FRAC_START1,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START2,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START3,
+ DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
+ {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
+};
+
+static const struct emac_reg_write cdr_setting[] = {
+ {EMAC_QSERDES_RX_CDR_CONTROL,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
+ {EMAC_QSERDES_RX_CDR_CONTROL2,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
+};
+
+static const struct emac_reg_write tx_rx_setting[] = {
+ {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
+ {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
+ {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
+ {EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
+ TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
+ {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
+ {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
+};
+
+static const struct emac_reg_write sgmii_v2_laned[] = {
+ /* CDR Settings */
+ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+ UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
+ {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+ /* TX/RX Settings */
+ {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+ {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+ {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+ {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+ {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+ {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+ {EMAC_SGMII_LN_CML_CTRL_MODE0,
+ CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+ {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+ MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+ {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+ {EMAC_SGMII_LN_SIGDET_ENABLES,
+ SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+ {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+ {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+ {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
+ {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+ DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+ {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+ {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
+ {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+ {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
+ {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming_v2[] = {
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+ {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+static int emac_sgmii_link_init(struct emac_adapter *adpt)
+{
+ struct phy_device *phydev = adpt->phydev;
+ struct emac_phy *phy = &adpt->phy;
+ u32 val;
+
+ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+ val |= AN_ENABLE;
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+ } else {
+ u32 speed_cfg;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_cfg = SPDMODE_10;
+ break;
+ case SPEED_100:
+ speed_cfg = SPDMODE_100;
+ break;
+ case SPEED_1000:
+ speed_cfg = SPDMODE_1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL)
+ speed_cfg |= DUPLEX_MODE;
+
+ val &= ~AN_ENABLE;
+ writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1);
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+ }
+
+ return 0;
+}
+
+static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
+{
+ struct emac_phy *phy = &adpt->phy;
+ u32 status;
+
+ writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+ writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+ /* Ensure interrupt clear command is written to HW */
+ wmb();
+
+ /* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must
+ * be confirmed before clearing the bits in other registers.
+ * It takes a few cycles for hw to clear the interrupt status.
+ */
+ if (readl_poll_timeout_atomic(phy->base +
+ EMAC_SGMII_PHY_INTERRUPT_STATUS,
+ status, !(status & irq_bits), 1,
+ SGMII_PHY_IRQ_CLR_WAIT_TIME)) {
+ netdev_err(adpt->netdev,
+ "error: failed clear SGMII irq: status:0x%x bits:0x%x\n",
+ status, irq_bits);
+ return -EIO;
+ }
+
+ /* Finalize clearing procedure */
+ writel_relaxed(0, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+ writel_relaxed(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+
+ /* Ensure that clearing procedure finalization is written to HW */
+ wmb();
+
+ return 0;
+}
+
+int emac_sgmii_init_v1(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ unsigned int i;
+ int ret;
+
+ ret = emac_sgmii_link_init(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v1,
+ ARRAY_SIZE(physical_coding_sublayer_programming_v1));
+ emac_reg_write_all(phy->base, sysclk_refclk_setting,
+ ARRAY_SIZE(sysclk_refclk_setting));
+ emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting));
+ emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting));
+ emac_reg_write_all(phy->base, tx_rx_setting,
+ ARRAY_SIZE(tx_rx_setting));
+
+ /* Power up the Ser/Des engine */
+ writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START);
+
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "error: ser/des failed to start\n");
+ return -EIO;
+ }
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+
+ return 0;
+}
+
+int emac_sgmii_init_v2(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ void __iomem *phy_regs = phy->base;
+ void __iomem *laned = phy->digital;
+ unsigned int i;
+ u32 lnstatus;
+ int ret;
+
+ ret = emac_sgmii_link_init(adpt);
+ if (ret)
+ return ret;
+
+ /* PCS lane-x init */
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v2,
+ ARRAY_SIZE(physical_coding_sublayer_programming_v2));
+
+ /* SGMII lane-x init */
+ emac_reg_write_all(phy->digital,
+ sgmii_v2_laned, ARRAY_SIZE(sgmii_v2_laned));
+
+ /* Power up PCS and start reset lane state machine */
+
+ writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+ writel(1, laned + SGMII_LN_RSM_START);
+
+ /* Wait for c_ready assertion */
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+ if (lnstatus & BIT(1))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "SGMII failed to start\n");
+ return -EIO;
+ }
+
+ /* Disable digital and SERDES loopback */
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+ writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+
+ return 0;
+}
+
+static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ u32 val;
+
+ /* Reset PHY */
+ val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ writel(((val & ~PHY_RESET) | PHY_RESET), phy->base +
+ EMAC_EMAC_WRAPPER_CSR2);
+ /* Ensure phy-reset command is written to HW before the release cmd */
+ msleep(50);
+ val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ writel((val & ~PHY_RESET), phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ /* Ensure phy-reset release command is written to HW before initializing
+ * SGMII
+ */
+ msleep(50);
+}
+
+void emac_sgmii_reset(struct emac_adapter *adpt)
+{
+ int ret;
+
+ clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+ emac_sgmii_reset_prepare(adpt);
+
+ ret = adpt->phy.initialize(adpt);
+ if (ret)
+ netdev_err(adpt->netdev,
+ "could not reinitialize internal PHY (error=%i)\n",
+ ret);
+
+ clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
+}
+
+static int emac_sgmii_acpi_match(struct device *dev, void *data)
+{
+ static const struct acpi_device_id match_table[] = {
+ {
+ .id = "QCOM8071",
+ .driver_data = (kernel_ulong_t)emac_sgmii_init_v2,
+ },
+ {}
+ };
+ const struct acpi_device_id *id = acpi_match_device(match_table, dev);
+ emac_sgmii_initialize *initialize = data;
+
+ if (id)
+ *initialize = (emac_sgmii_initialize)id->driver_data;
+
+ return !!id;
+}
+
+static const struct of_device_id emac_sgmii_dt_match[] = {
+ {
+ .compatible = "qcom,fsm9900-emac-sgmii",
+ .data = emac_sgmii_init_v1,
+ },
+ {
+ .compatible = "qcom,qdf2432-emac-sgmii",
+ .data = emac_sgmii_init_v2,
+ },
+ {}
+};
+
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+ struct platform_device *sgmii_pdev = NULL;
+ struct emac_phy *phy = &adpt->phy;
+ struct resource *res;
+ int ret;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ struct device *dev;
+
+ dev = device_find_child(&pdev->dev, &phy->initialize,
+ emac_sgmii_acpi_match);
+
+ if (!dev) {
+ dev_err(&pdev->dev, "cannot find internal phy node\n");
+ return -ENODEV;
+ }
+
+ sgmii_pdev = to_platform_device(dev);
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing internal-phy property\n");
+ return -ENODEV;
+ }
+
+ sgmii_pdev = of_find_device_by_node(np);
+ if (!sgmii_pdev) {
+ dev_err(&pdev->dev, "invalid internal-phy property\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "unrecognized internal phy node\n");
+ ret = -ENODEV;
+ goto error_put_device;
+ }
+
+ phy->initialize = (emac_sgmii_initialize)match->data;
+ }
+
+ /* Base address is the first address */
+ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto error_put_device;
+ }
+
+ phy->base = ioremap(res->start, resource_size(res));
+ if (!phy->base) {
+ ret = -ENOMEM;
+ goto error_put_device;
+ }
+
+ /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ phy->digital = ioremap(res->start, resource_size(res));
+ if (!phy->digital) {
+ ret = -ENOMEM;
+ goto error_unmap_base;
+ }
+ }
+
+ ret = phy->initialize(adpt);
+ if (ret)
+ goto error;
+
+ /* We've remapped the addresses, so we don't need the device any
+ * more. of_find_device_by_node() says we should release it.
+ */
+ put_device(&sgmii_pdev->dev);
+
+ return 0;
+
+error:
+ if (phy->digital)
+ iounmap(phy->digital);
+error_unmap_base:
+ iounmap(phy->base);
+error_put_device:
+ put_device(&sgmii_pdev->dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
new file mode 100644
index 000000000000..ce79212ff403
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EMAC_SGMII_H_
+#define _EMAC_SGMII_H_
+
+struct emac_adapter;
+struct platform_device;
+
+int emac_sgmii_init_v1(struct emac_adapter *adpt);
+int emac_sgmii_init_v2(struct emac_adapter *adpt);
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
+void emac_sgmii_reset(struct emac_adapter *adpt);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
new file mode 100644
index 000000000000..9bf3b2b82e95
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -0,0 +1,755 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+#define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define EMAC_RRD_SIZE 4
+/* The RRD size if timestamping is enabled: */
+#define EMAC_TS_RRD_SIZE 6
+#define EMAC_TPD_SIZE 4
+#define EMAC_RFD_SIZE 2
+
+#define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
+#define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
+#define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
+#define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
+
+#define RXQ0_NUM_RFD_PREF_DEF 8
+#define TXQ0_NUM_TPD_PREF_DEF 5
+
+#define EMAC_PREAMBLE_DEF 7
+
+#define DMAR_DLY_CNT_DEF 15
+#define DMAW_DLY_CNT_DEF 4
+
+#define IMR_NORMAL_MASK (\
+ ISR_ERROR |\
+ ISR_GPHY_LINK |\
+ ISR_TX_PKT |\
+ GPHY_WAKEUP_INT)
+
+#define IMR_EXTENDED_MASK (\
+ SW_MAN_INT |\
+ ISR_OVER |\
+ ISR_ERROR |\
+ ISR_GPHY_LINK |\
+ ISR_TX_PKT |\
+ GPHY_WAKEUP_INT)
+
+#define ISR_TX_PKT (\
+ TX_PKT_INT |\
+ TX_PKT_INT1 |\
+ TX_PKT_INT2 |\
+ TX_PKT_INT3)
+
+#define ISR_GPHY_LINK (\
+ GPHY_LINK_UP_INT |\
+ GPHY_LINK_DOWN_INT)
+
+#define ISR_OVER (\
+ RFD0_UR_INT |\
+ RFD1_UR_INT |\
+ RFD2_UR_INT |\
+ RFD3_UR_INT |\
+ RFD4_UR_INT |\
+ RXF_OF_INT |\
+ TXF_UR_INT)
+
+#define ISR_ERROR (\
+ DMAR_TO_INT |\
+ DMAW_TO_INT |\
+ TXQ_TO_INT)
+
+/* in sync with enum emac_clk_id */
+static const char * const emac_clk_name[] = {
+ "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
+ "rx_clk", "sys_clk"
+};
+
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
+{
+ u32 data = readl(addr);
+
+ writel(((data & ~mask) | val), addr);
+}
+
+/* reinitialize */
+int emac_reinit_locked(struct emac_adapter *adpt)
+{
+ int ret;
+
+ mutex_lock(&adpt->reset_lock);
+
+ emac_mac_down(adpt);
+ emac_sgmii_reset(adpt);
+ ret = emac_mac_up(adpt);
+
+ mutex_unlock(&adpt->reset_lock);
+
+ return ret;
+}
+
+/* NAPI */
+static int emac_napi_rtx(struct napi_struct *napi, int budget)
+{
+ struct emac_rx_queue *rx_q =
+ container_of(napi, struct emac_rx_queue, napi);
+ struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
+ struct emac_irq *irq = rx_q->irq;
+ int work_done = 0;
+
+ emac_mac_rx_process(adpt, rx_q, &work_done, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ irq->mask |= rx_q->intr;
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+ }
+
+ return work_done;
+}
+
+/* Transmit the packet */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
+}
+
+irqreturn_t emac_isr(int _irq, void *data)
+{
+ struct emac_irq *irq = data;
+ struct emac_adapter *adpt =
+ container_of(irq, struct emac_adapter, irq);
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ u32 isr, status;
+
+ /* disable the interrupt */
+ writel(0, adpt->base + EMAC_INT_MASK);
+
+ isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
+
+ status = isr & irq->mask;
+ if (status == 0)
+ goto exit;
+
+ if (status & ISR_ERROR) {
+ netif_warn(adpt, intr, adpt->netdev,
+ "warning: error irq status 0x%lx\n",
+ status & ISR_ERROR);
+ /* reset MAC */
+ schedule_work(&adpt->work_thread);
+ }
+
+ /* Schedule the napi for receive queue with interrupt
+ * status bit set
+ */
+ if (status & rx_q->intr) {
+ if (napi_schedule_prep(&rx_q->napi)) {
+ irq->mask &= ~rx_q->intr;
+ __napi_schedule(&rx_q->napi);
+ }
+ }
+
+ if (status & TX_PKT_INT)
+ emac_mac_tx_process(adpt, &adpt->tx_q);
+
+ if (status & ISR_OVER)
+ net_warn_ratelimited("warning: TX/RX overflow\n");
+
+ /* link event */
+ if (status & ISR_GPHY_LINK)
+ phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
+
+exit:
+ /* enable the interrupt */
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+
+ return IRQ_HANDLED;
+}
+
+/* Configure VLAN tag strip/insert feature */
+static int emac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ /* We only need to reprogram the hardware if the VLAN tag features
+ * have changed, and if it's already running.
+ */
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
+ return 0;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ /* emac_mac_mode_config() uses netdev->features to configure the EMAC,
+ * so make sure it's set first.
+ */
+ netdev->features = features;
+
+ return emac_reinit_locked(adpt);
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_rx_mode_set(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+
+ emac_mac_mode_config(adpt);
+
+ /* update multicast address filtering */
+ emac_mac_multicast_addr_clear(adpt);
+ netdev_for_each_mc_addr(ha, netdev)
+ emac_mac_multicast_addr_set(adpt, ha->addr);
+}
+
+/* Change the Maximum Transfer Unit (MTU) */
+static int emac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) ||
+ (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) {
+ netdev_err(adpt->netdev, "error: invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ netif_info(adpt, hw, adpt->netdev,
+ "changing MTU from %d to %d\n", netdev->mtu,
+ new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+/* Called when the network interface is made active */
+static int emac_open(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ int ret;
+
+ /* allocate rx/tx dma buffer & descriptors */
+ ret = emac_mac_rx_tx_rings_alloc_all(adpt);
+ if (ret) {
+ netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+ return ret;
+ }
+
+ ret = emac_mac_up(adpt);
+ if (ret) {
+ emac_mac_rx_tx_rings_free_all(adpt);
+ return ret;
+ }
+
+ emac_mac_start(adpt);
+
+ return 0;
+}
+
+/* Called when the network interface is disabled */
+static int emac_close(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ mutex_lock(&adpt->reset_lock);
+
+ emac_mac_down(adpt);
+ emac_mac_rx_tx_rings_free_all(adpt);
+
+ mutex_unlock(&adpt->reset_lock);
+
+ return 0;
+}
+
+/* Respond to a TX hang */
+static void emac_tx_timeout(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ schedule_work(&adpt->work_thread);
+}
+
+/* IOCTL support for the interface */
+static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!netdev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+/* Provide network statistics info for the interface */
+static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ unsigned int addr = REG_MAC_RX_STATUS_BIN;
+ struct emac_stats *stats = &adpt->stats;
+ u64 *stats_itr = &adpt->stats.rx_ok;
+ u32 val;
+
+ spin_lock(&stats->lock);
+
+ while (addr <= REG_MAC_RX_STATUS_END) {
+ val = readl_relaxed(adpt->base + addr);
+ *stats_itr += val;
+ stats_itr++;
+ addr += sizeof(u32);
+ }
+
+ /* additional rx status */
+ val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23);
+ adpt->stats.rx_crc_align += val;
+ val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24);
+ adpt->stats.rx_jabbers += val;
+
+ /* update tx status */
+ addr = REG_MAC_TX_STATUS_BIN;
+ stats_itr = &adpt->stats.tx_ok;
+
+ while (addr <= REG_MAC_TX_STATUS_END) {
+ val = readl_relaxed(adpt->base + addr);
+ *stats_itr += val;
+ ++stats_itr;
+ addr += sizeof(u32);
+ }
+
+ /* additional tx status */
+ val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25);
+ adpt->stats.tx_col += val;
+
+ /* return parsed statistics */
+ net_stats->rx_packets = stats->rx_ok;
+ net_stats->tx_packets = stats->tx_ok;
+ net_stats->rx_bytes = stats->rx_byte_cnt;
+ net_stats->tx_bytes = stats->tx_byte_cnt;
+ net_stats->multicast = stats->rx_mcast;
+ net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
+ stats->tx_late_col + stats->tx_abort_col;
+
+ net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
+ stats->rx_len_err + stats->rx_sz_ov +
+ stats->rx_align_err;
+ net_stats->rx_fifo_errors = stats->rx_rxf_ov;
+ net_stats->rx_length_errors = stats->rx_len_err;
+ net_stats->rx_crc_errors = stats->rx_fcs_err;
+ net_stats->rx_frame_errors = stats->rx_align_err;
+ net_stats->rx_over_errors = stats->rx_rxf_ov;
+ net_stats->rx_missed_errors = stats->rx_rxf_ov;
+
+ net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
+ stats->tx_underrun + stats->tx_trunc;
+ net_stats->tx_fifo_errors = stats->tx_underrun;
+ net_stats->tx_aborted_errors = stats->tx_abort_col;
+ net_stats->tx_window_errors = stats->tx_late_col;
+
+ spin_unlock(&stats->lock);
+
+ return net_stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_get_stats64 = emac_get_stats64,
+ .ndo_set_features = emac_set_features,
+ .ndo_set_rx_mode = emac_rx_mode_set,
+};
+
+/* Watchdog task routine, called to reinitialize the EMAC */
+static void emac_work_thread(struct work_struct *work)
+{
+ struct emac_adapter *adpt =
+ container_of(work, struct emac_adapter, work_thread);
+
+ emac_reinit_locked(adpt);
+}
+
+/* Initialize various data structures */
+static void emac_init_adapter(struct emac_adapter *adpt)
+{
+ u32 reg;
+
+ /* descriptors */
+ adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
+ adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
+
+ /* dma */
+ adpt->dma_order = emac_dma_ord_out;
+ adpt->dmar_block = emac_dma_req_4096;
+ adpt->dmaw_block = emac_dma_req_128;
+ adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
+ adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
+ adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
+ adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
+
+ /* irq moderator */
+ reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
+ ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
+ adpt->irq_mod = reg;
+
+ /* others */
+ adpt->preamble = EMAC_PREAMBLE_DEF;
+}
+
+/* Get the clock */
+static int emac_clks_get(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMAC_CLK_CNT; i++) {
+ struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev,
+ "could not claim clock %s (error=%li)\n",
+ emac_clk_name[i], PTR_ERR(clk));
+
+ return PTR_ERR(clk);
+ }
+
+ adpt->clk[i] = clk;
+ }
+
+ return 0;
+}
+
+/* Initialize clocks */
+static int emac_clks_phase1_init(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ int ret;
+
+ ret = emac_clks_get(pdev, adpt);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+}
+
+/* Enable clocks; needs emac_clks_phase1_init to be called before */
+static int emac_clks_phase2_init(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ int ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
+}
+
+static void emac_clks_teardown(struct emac_adapter *adpt)
+{
+
+ unsigned int i;
+
+ for (i = 0; i < EMAC_CLK_CNT; i++)
+ clk_disable_unprepare(adpt->clk[i]);
+}
+
+/* Get the resources */
+static int emac_probe_resources(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ struct resource *res;
+ char maddr[ETH_ALEN];
+ int ret = 0;
+
+ /* get mac address */
+ if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
+ ether_addr_copy(netdev->dev_addr, maddr);
+ else
+ eth_hw_addr_random(netdev);
+
+ /* Core 0 interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "error: missing core0 irq resource (error=%i)\n", ret);
+ return ret;
+ }
+ adpt->irq.irq = ret;
+
+ /* base register address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adpt->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adpt->base))
+ return PTR_ERR(adpt->base);
+
+ /* CSR register address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ adpt->csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adpt->csr))
+ return PTR_ERR(adpt->csr);
+
+ netdev->base_addr = (unsigned long)adpt->base;
+
+ return 0;
+}
+
+static const struct of_device_id emac_dt_match[] = {
+ {
+ .compatible = "qcom,fsm9900-emac",
+ },
+ {}
+};
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id emac_acpi_match[] = {
+ {
+ .id = "QCOM8070",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
+#endif
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct emac_adapter *adpt;
+ struct emac_phy *phy;
+ u16 devid, revid;
+ u32 reg;
+ int ret;
+
+ /* The EMAC itself is capable of 64-bit DMA, so try that first. */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ /* Some platforms may restrict the EMAC's address bus to less
+ * then the size of DDR. In this case, we need to try a
+ * smaller mask. We could try every possible smaller mask,
+ * but that's overkill. Instead, just fall to 32-bit, which
+ * should always work.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
+ }
+ }
+
+ netdev = alloc_etherdev(sizeof(struct emac_adapter));
+ if (!netdev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adpt = netdev_priv(netdev);
+ adpt->netdev = netdev;
+ adpt->msg_enable = EMAC_MSG_DEFAULT;
+
+ phy = &adpt->phy;
+
+ mutex_init(&adpt->reset_lock);
+ spin_lock_init(&adpt->stats.lock);
+
+ adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
+
+ ret = emac_probe_resources(pdev, adpt);
+ if (ret)
+ goto err_undo_netdev;
+
+ /* initialize clocks */
+ ret = emac_clks_phase1_init(pdev, adpt);
+ if (ret) {
+ dev_err(&pdev->dev, "could not initialize clocks\n");
+ goto err_undo_netdev;
+ }
+
+ netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
+ netdev->irq = adpt->irq.irq;
+
+ adpt->rrd_size = EMAC_RRD_SIZE;
+ adpt->tpd_size = EMAC_TPD_SIZE;
+ adpt->rfd_size = EMAC_RFD_SIZE;
+
+ netdev->netdev_ops = &emac_netdev_ops;
+
+ emac_init_adapter(adpt);
+
+ /* init external phy */
+ ret = emac_phy_config(pdev, adpt);
+ if (ret)
+ goto err_undo_clocks;
+
+ /* init internal sgmii phy */
+ ret = emac_sgmii_config(pdev, adpt);
+ if (ret)
+ goto err_undo_mdiobus;
+
+ /* enable clocks */
+ ret = emac_clks_phase2_init(pdev, adpt);
+ if (ret) {
+ dev_err(&pdev->dev, "could not initialize clocks\n");
+ goto err_undo_mdiobus;
+ }
+
+ emac_mac_reset(adpt);
+
+ /* set hw features */
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features = netdev->features;
+
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ INIT_WORK(&adpt->work_thread, emac_work_thread);
+
+ /* Initialize queues */
+ emac_mac_rx_tx_ring_init_all(pdev, adpt);
+
+ netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
+ NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register net device\n");
+ goto err_undo_napi;
+ }
+
+ reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
+ devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT;
+ revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
+ reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
+
+ netif_info(adpt, probe, netdev,
+ "hardware id %d.%d, hardware version %d.%d.%d\n",
+ devid, revid,
+ (reg & MAJOR_BMSK) >> MAJOR_SHFT,
+ (reg & MINOR_BMSK) >> MINOR_SHFT,
+ (reg & STEP_BMSK) >> STEP_SHFT);
+
+ return 0;
+
+err_undo_napi:
+ netif_napi_del(&adpt->rx_q.napi);
+err_undo_mdiobus:
+ mdiobus_unregister(adpt->mii_bus);
+err_undo_clocks:
+ emac_clks_teardown(adpt);
+err_undo_netdev:
+ free_netdev(netdev);
+
+ return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ netif_napi_del(&adpt->rx_q.napi);
+
+ emac_clks_teardown(adpt);
+
+ mdiobus_unregister(adpt->mii_bus);
+ free_netdev(netdev);
+
+ if (adpt->phy.digital)
+ iounmap(adpt->phy.digital);
+ iounmap(adpt->phy.base);
+
+ return 0;
+}
+
+static struct platform_driver emac_platform_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = "qcom-emac",
+ .of_match_table = emac_dt_match,
+ .acpi_match_table = ACPI_PTR(emac_acpi_match),
+ },
+};
+
+module_platform_driver(emac_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
new file mode 100644
index 000000000000..0c76e6cb8c9e
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -0,0 +1,335 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EMAC_H_
+#define _EMAC_H_
+
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include "emac-mac.h"
+#include "emac-phy.h"
+
+/* EMAC base register offsets */
+#define EMAC_DMA_MAS_CTRL 0x001400
+#define EMAC_IRQ_MOD_TIM_INIT 0x001408
+#define EMAC_BLK_IDLE_STS 0x00140c
+#define EMAC_PHY_LINK_DELAY 0x00141c
+#define EMAC_SYS_ALIV_CTRL 0x001434
+#define EMAC_MAC_IPGIFG_CTRL 0x001484
+#define EMAC_MAC_STA_ADDR0 0x001488
+#define EMAC_MAC_STA_ADDR1 0x00148c
+#define EMAC_HASH_TAB_REG0 0x001490
+#define EMAC_HASH_TAB_REG1 0x001494
+#define EMAC_MAC_HALF_DPLX_CTRL 0x001498
+#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c
+#define EMAC_INT_STATUS 0x001600
+#define EMAC_INT_MASK 0x001604
+#define EMAC_RXMAC_STATC_REG0 0x001700
+#define EMAC_RXMAC_STATC_REG22 0x001758
+#define EMAC_TXMAC_STATC_REG0 0x001760
+#define EMAC_TXMAC_STATC_REG24 0x0017c0
+#define EMAC_CORE_HW_VERSION 0x001974
+#define EMAC_IDT_TABLE0 0x001b00
+#define EMAC_RXMAC_STATC_REG23 0x001bc8
+#define EMAC_RXMAC_STATC_REG24 0x001bcc
+#define EMAC_TXMAC_STATC_REG25 0x001bd0
+#define EMAC_INT1_MASK 0x001bf0
+#define EMAC_INT1_STATUS 0x001bf4
+#define EMAC_INT2_MASK 0x001bf8
+#define EMAC_INT2_STATUS 0x001bfc
+#define EMAC_INT3_MASK 0x001c00
+#define EMAC_INT3_STATUS 0x001c04
+
+/* EMAC_DMA_MAS_CTRL */
+#define DEV_ID_NUM_BMSK 0x7f000000
+#define DEV_ID_NUM_SHFT 24
+#define DEV_REV_NUM_BMSK 0xff0000
+#define DEV_REV_NUM_SHFT 16
+#define INT_RD_CLR_EN 0x4000
+#define IRQ_MODERATOR2_EN 0x800
+#define IRQ_MODERATOR_EN 0x400
+#define LPW_CLK_SEL 0x80
+#define LPW_STATE 0x20
+#define LPW_MODE 0x10
+#define SOFT_RST 0x1
+
+/* EMAC_IRQ_MOD_TIM_INIT */
+#define IRQ_MODERATOR2_INIT_BMSK 0xffff0000
+#define IRQ_MODERATOR2_INIT_SHFT 16
+#define IRQ_MODERATOR_INIT_BMSK 0xffff
+#define IRQ_MODERATOR_INIT_SHFT 0
+
+/* EMAC_INT_STATUS */
+#define DIS_INT BIT(31)
+#define PTP_INT BIT(30)
+#define RFD4_UR_INT BIT(29)
+#define TX_PKT_INT3 BIT(26)
+#define TX_PKT_INT2 BIT(25)
+#define TX_PKT_INT1 BIT(24)
+#define RX_PKT_INT3 BIT(19)
+#define RX_PKT_INT2 BIT(18)
+#define RX_PKT_INT1 BIT(17)
+#define RX_PKT_INT0 BIT(16)
+#define TX_PKT_INT BIT(15)
+#define TXQ_TO_INT BIT(14)
+#define GPHY_WAKEUP_INT BIT(13)
+#define GPHY_LINK_DOWN_INT BIT(12)
+#define GPHY_LINK_UP_INT BIT(11)
+#define DMAW_TO_INT BIT(10)
+#define DMAR_TO_INT BIT(9)
+#define TXF_UR_INT BIT(8)
+#define RFD3_UR_INT BIT(7)
+#define RFD2_UR_INT BIT(6)
+#define RFD1_UR_INT BIT(5)
+#define RFD0_UR_INT BIT(4)
+#define RXF_OF_INT BIT(3)
+#define SW_MAN_INT BIT(2)
+
+/* EMAC_MAILBOX_6 */
+#define RFD2_PROC_IDX_BMSK 0xfff0000
+#define RFD2_PROC_IDX_SHFT 16
+#define RFD2_PROD_IDX_BMSK 0xfff
+#define RFD2_PROD_IDX_SHFT 0
+
+/* EMAC_CORE_HW_VERSION */
+#define MAJOR_BMSK 0xf0000000
+#define MAJOR_SHFT 28
+#define MINOR_BMSK 0xfff0000
+#define MINOR_SHFT 16
+#define STEP_BMSK 0xffff
+#define STEP_SHFT 0
+
+/* EMAC_EMAC_WRAPPER_CSR1 */
+#define TX_INDX_FIFO_SYNC_RST BIT(23)
+#define TX_TS_FIFO_SYNC_RST BIT(22)
+#define RX_TS_FIFO2_SYNC_RST BIT(21)
+#define RX_TS_FIFO1_SYNC_RST BIT(20)
+#define TX_TS_ENABLE BIT(16)
+#define DIS_1588_CLKS BIT(11)
+#define FREQ_MODE BIT(9)
+#define ENABLE_RRD_TIMESTAMP BIT(3)
+
+/* EMAC_EMAC_WRAPPER_CSR2 */
+#define HDRIVE_BMSK 0x3000
+#define HDRIVE_SHFT 12
+#define SLB_EN BIT(9)
+#define PLB_EN BIT(8)
+#define WOL_EN BIT(3)
+#define PHY_RESET BIT(0)
+
+#define EMAC_DEV_ID 0x0040
+
+/* SGMII v2 per lane registers */
+#define SGMII_LN_RSM_START 0x029C
+
+/* SGMII v2 PHY common registers */
+#define SGMII_PHY_CMN_CTRL 0x0408
+#define SGMII_PHY_CMN_RESET_CTRL 0x0410
+
+/* SGMII v2 PHY registers per lane */
+#define SGMII_PHY_LN_OFFSET 0x0400
+#define SGMII_PHY_LN_LANE_STATUS 0x00DC
+#define SGMII_PHY_LN_BIST_GEN0 0x008C
+#define SGMII_PHY_LN_BIST_GEN1 0x0090
+#define SGMII_PHY_LN_BIST_GEN2 0x0094
+#define SGMII_PHY_LN_BIST_GEN3 0x0098
+#define SGMII_PHY_LN_CDR_CTRL1 0x005C
+
+enum emac_clk_id {
+ EMAC_CLK_AXI,
+ EMAC_CLK_CFG_AHB,
+ EMAC_CLK_HIGH_SPEED,
+ EMAC_CLK_MDIO,
+ EMAC_CLK_TX,
+ EMAC_CLK_RX,
+ EMAC_CLK_SYS,
+ EMAC_CLK_CNT
+};
+
+#define EMAC_LINK_SPEED_UNKNOWN 0x0
+#define EMAC_LINK_SPEED_10_HALF BIT(0)
+#define EMAC_LINK_SPEED_10_FULL BIT(1)
+#define EMAC_LINK_SPEED_100_HALF BIT(2)
+#define EMAC_LINK_SPEED_100_FULL BIT(3)
+#define EMAC_LINK_SPEED_1GB_FULL BIT(5)
+
+#define EMAC_MAX_SETUP_LNK_CYCLE 100
+
+/* Wake On Lan */
+#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */
+#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */
+
+struct emac_stats {
+ /* rx */
+ u64 rx_ok; /* good packets */
+ u64 rx_bcast; /* good broadcast packets */
+ u64 rx_mcast; /* good multicast packets */
+ u64 rx_pause; /* pause packet */
+ u64 rx_ctrl; /* control packets other than pause frame. */
+ u64 rx_fcs_err; /* packets with bad FCS. */
+ u64 rx_len_err; /* packets with length mismatch */
+ u64 rx_byte_cnt; /* good bytes count (without FCS) */
+ u64 rx_runt; /* runt packets */
+ u64 rx_frag; /* fragment count */
+ u64 rx_sz_64; /* packets that are 64 bytes */
+ u64 rx_sz_65_127; /* packets that are 65-127 bytes */
+ u64 rx_sz_128_255; /* packets that are 128-255 bytes */
+ u64 rx_sz_256_511; /* packets that are 256-511 bytes */
+ u64 rx_sz_512_1023; /* packets that are 512-1023 bytes */
+ u64 rx_sz_1024_1518; /* packets that are 1024-1518 bytes */
+ u64 rx_sz_1519_max; /* packets that are 1519-MTU bytes*/
+ u64 rx_sz_ov; /* packets that are >MTU bytes (truncated) */
+ u64 rx_rxf_ov; /* packets dropped due to RX FIFO overflow */
+ u64 rx_align_err; /* alignment errors */
+ u64 rx_bcast_byte_cnt; /* broadcast packets byte count (without FCS) */
+ u64 rx_mcast_byte_cnt; /* multicast packets byte count (without FCS) */
+ u64 rx_err_addr; /* packets dropped due to address filtering */
+ u64 rx_crc_align; /* CRC align errors */
+ u64 rx_jabbers; /* jabbers */
+
+ /* tx */
+ u64 tx_ok; /* good packets */
+ u64 tx_bcast; /* good broadcast packets */
+ u64 tx_mcast; /* good multicast packets */
+ u64 tx_pause; /* pause packets */
+ u64 tx_exc_defer; /* packets with excessive deferral */
+ u64 tx_ctrl; /* control packets other than pause frame */
+ u64 tx_defer; /* packets that are deferred. */
+ u64 tx_byte_cnt; /* good bytes count (without FCS) */
+ u64 tx_sz_64; /* packets that are 64 bytes */
+ u64 tx_sz_65_127; /* packets that are 65-127 bytes */
+ u64 tx_sz_128_255; /* packets that are 128-255 bytes */
+ u64 tx_sz_256_511; /* packets that are 256-511 bytes */
+ u64 tx_sz_512_1023; /* packets that are 512-1023 bytes */
+ u64 tx_sz_1024_1518; /* packets that are 1024-1518 bytes */
+ u64 tx_sz_1519_max; /* packets that are 1519-MTU bytes */
+ u64 tx_1_col; /* packets single prior collision */
+ u64 tx_2_col; /* packets with multiple prior collisions */
+ u64 tx_late_col; /* packets with late collisions */
+ u64 tx_abort_col; /* packets aborted due to excess collisions */
+ u64 tx_underrun; /* packets aborted due to FIFO underrun */
+ u64 tx_rd_eop; /* count of reads beyond EOP */
+ u64 tx_len_err; /* packets with length mismatch */
+ u64 tx_trunc; /* packets truncated due to size >MTU */
+ u64 tx_bcast_byte; /* broadcast packets byte count (without FCS) */
+ u64 tx_mcast_byte; /* multicast packets byte count (without FCS) */
+ u64 tx_col; /* collisions */
+
+ spinlock_t lock; /* prevent multiple simultaneous readers */
+};
+
+/* RSS hstype Definitions */
+#define EMAC_RSS_HSTYP_IPV4_EN 0x00000001
+#define EMAC_RSS_HSTYP_TCP4_EN 0x00000002
+#define EMAC_RSS_HSTYP_IPV6_EN 0x00000004
+#define EMAC_RSS_HSTYP_TCP6_EN 0x00000008
+#define EMAC_RSS_HSTYP_ALL_EN (\
+ EMAC_RSS_HSTYP_IPV4_EN |\
+ EMAC_RSS_HSTYP_TCP4_EN |\
+ EMAC_RSS_HSTYP_IPV6_EN |\
+ EMAC_RSS_HSTYP_TCP6_EN)
+
+#define EMAC_VLAN_TO_TAG(_vlan, _tag) \
+ (_tag = ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8)))
+
+#define EMAC_TAG_TO_VLAN(_tag, _vlan) \
+ (_vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8)))
+
+#define EMAC_DEF_RX_BUF_SIZE 1536
+#define EMAC_MAX_JUMBO_PKT_SIZE (9 * 1024)
+#define EMAC_MAX_TX_OFFLOAD_THRESH (9 * 1024)
+
+#define EMAC_MAX_ETH_FRAME_SIZE EMAC_MAX_JUMBO_PKT_SIZE
+#define EMAC_MIN_ETH_FRAME_SIZE 68
+
+#define EMAC_DEF_TX_QUEUES 1
+#define EMAC_DEF_RX_QUEUES 1
+
+#define EMAC_MIN_TX_DESCS 128
+#define EMAC_MIN_RX_DESCS 128
+
+#define EMAC_MAX_TX_DESCS 16383
+#define EMAC_MAX_RX_DESCS 2047
+
+#define EMAC_DEF_TX_DESCS 512
+#define EMAC_DEF_RX_DESCS 256
+
+#define EMAC_DEF_RX_IRQ_MOD 250
+#define EMAC_DEF_TX_IRQ_MOD 250
+
+#define EMAC_WATCHDOG_TIME (5 * HZ)
+
+/* by default check link every 4 seconds */
+#define EMAC_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* emac_irq per-device (per-adapter) irq properties.
+ * @irq: irq number.
+ * @mask mask to use over status register.
+ */
+struct emac_irq {
+ unsigned int irq;
+ u32 mask;
+};
+
+/* The device's main data structure */
+struct emac_adapter {
+ struct net_device *netdev;
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+
+ void __iomem *base;
+ void __iomem *csr;
+
+ struct emac_phy phy;
+ struct emac_stats stats;
+
+ struct emac_irq irq;
+ struct clk *clk[EMAC_CLK_CNT];
+
+ /* All Descriptor memory */
+ struct emac_ring_header ring_header;
+ struct emac_tx_queue tx_q;
+ struct emac_rx_queue rx_q;
+ unsigned int tx_desc_cnt;
+ unsigned int rx_desc_cnt;
+ unsigned int rrd_size; /* in quad words */
+ unsigned int rfd_size; /* in quad words */
+ unsigned int tpd_size; /* in quad words */
+
+ unsigned int rxbuf_size;
+
+ /* Ring parameter */
+ u8 tpd_burst;
+ u8 rfd_burst;
+ unsigned int dmaw_dly_cnt;
+ unsigned int dmar_dly_cnt;
+ enum emac_dma_req_block dmar_block;
+ enum emac_dma_req_block dmaw_block;
+ enum emac_dma_order dma_order;
+
+ u32 irq_mod;
+ u32 preamble;
+
+ struct work_struct work_thread;
+
+ u16 msg_enable;
+
+ struct mutex reset_lock;
+};
+
+int emac_reinit_locked(struct emac_adapter *adpt);
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
+irqreturn_t emac_isr(int irq, void *data);
+
+#endif /* _EMAC_H_ */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index cb29ee24cf1b..5ef5d728c250 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1062,14 +1062,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 4f132cf177cd..85ec447c2d18 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -27,7 +27,7 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A779x.
+ R8A7740, R8A774x, R8A777x and R8A779x.
config RAVB
tristate "Renesas Ethernet AVB support"
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 4e5d5e953e15..f1109661a533 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1011,7 +1011,6 @@ struct ravb_private {
struct work_struct work;
/* MII transceiver section. */
struct mii_bus *mii_bus; /* MDIO bus control */
- struct phy_device *phydev; /* PHY device control */
int link;
phy_interface_t phy_interface;
int msg_enable;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 1e1cc0fad17f..630536bc72f9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -942,7 +942,7 @@ out:
static void ravb_adjust_link(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
bool new_state = false;
if (phydev->link) {
@@ -1032,48 +1032,47 @@ static int ravb_phy_init(struct net_device *ndev)
phy_attached_info(phydev);
- priv->phydev = phydev;
-
return 0;
}
/* PHY control start function */
static int ravb_phy_start(struct net_device *ndev)
{
- struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_phy_init(ndev);
if (error)
return error;
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
return 0;
}
-static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int ravb_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
int error = -ENODEV;
unsigned long flags;
- if (priv->phydev) {
+ if (ndev->phydev) {
spin_lock_irqsave(&priv->lock, flags);
- error = phy_ethtool_gset(priv->phydev, ecmd);
+ error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&priv->lock, flags);
}
return error;
}
-static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int ravb_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
int error;
- if (!priv->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&priv->lock, flags);
@@ -1081,11 +1080,11 @@ static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
/* Disable TX and RX */
ravb_rcv_snd_disable(ndev);
- error = phy_ethtool_sset(priv->phydev, ecmd);
+ error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
if (error)
goto error_exit;
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
priv->duplex = 1;
else
priv->duplex = 0;
@@ -1110,9 +1109,9 @@ static int ravb_nway_reset(struct net_device *ndev)
int error = -ENODEV;
unsigned long flags;
- if (priv->phydev) {
+ if (ndev->phydev) {
spin_lock_irqsave(&priv->lock, flags);
- error = phy_start_aneg(priv->phydev);
+ error = phy_start_aneg(ndev->phydev);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1309,8 +1308,6 @@ static int ravb_get_ts_info(struct net_device *ndev,
}
static const struct ethtool_ops ravb_ethtool_ops = {
- .get_settings = ravb_get_settings,
- .set_settings = ravb_set_settings,
.nway_reset = ravb_nway_reset,
.get_msglevel = ravb_get_msglevel,
.set_msglevel = ravb_set_msglevel,
@@ -1321,6 +1318,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ringparam = ravb_get_ringparam,
.set_ringparam = ravb_set_ringparam,
.get_ts_info = ravb_get_ts_info,
+ .get_link_ksettings = ravb_get_link_ksettings,
+ .set_link_ksettings = ravb_set_link_ksettings,
};
static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
@@ -1661,10 +1660,9 @@ static int ravb_close(struct net_device *ndev)
}
/* PHY disconnect */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
}
if (priv->chip_id != RCAR_GEN2) {
@@ -1753,8 +1751,7 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
/* ioctl to device function */
static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct ravb_private *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -1876,6 +1873,20 @@ static int ravb_set_gti(struct net_device *ndev)
return 0;
}
+static void ravb_set_config_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+ CCC_GAC | CCC_CSEL_HPB);
+ }
+}
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1978,14 +1989,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ravb_ethtool_ops;
/* Set AVB config mode */
- if (chip_id == RCAR_GEN2) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
- CCC_GAC | CCC_CSEL_HPB);
- }
+ ravb_set_config_mode(ndev);
/* Set GTI value */
error = ravb_set_gti(ndev);
@@ -2096,8 +2100,55 @@ static int ravb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ravb_runtime_nop(struct device *dev)
+static int __maybe_unused ravb_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = ravb_close(ndev);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused ravb_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ /* All register have been reset to default values.
+ * Restore all registers which where setup at probe time and
+ * reopen device if it was running before system suspended.
+ */
+
+ /* Set AVB config mode */
+ ravb_set_config_mode(ndev);
+
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+
+ /* Restore descriptor base address table */
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ if (netif_running(ndev)) {
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ return ret;
+ netif_device_attach(ndev);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused ravb_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
@@ -2110,20 +2161,16 @@ static int ravb_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
};
-#define RAVB_PM_OPS (&ravb_dev_pm_ops)
-#else
-#define RAVB_PM_OPS NULL
-#endif
-
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
.remove = ravb_remove,
.driver = {
.name = "ravb",
- .pm = RAVB_PM_OPS,
+ .pm = &ravb_dev_pm_ops,
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 054e795df90f..05b0dc55de77 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1728,7 +1728,7 @@ out:
static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- struct phy_device *phydev = mdp->phydev;
+ struct phy_device *phydev = ndev->phydev;
int new_state = 0;
if (phydev->link) {
@@ -1805,51 +1805,48 @@ static int sh_eth_phy_init(struct net_device *ndev)
phy_attached_info(phydev);
- mdp->phydev = phydev;
-
return 0;
}
/* PHY control start function */
static int sh_eth_phy_start(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
ret = sh_eth_phy_init(ndev);
if (ret)
return ret;
- phy_start(mdp->phydev);
+ phy_start(ndev->phydev);
return 0;
}
-static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int sh_eth_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ ret = phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
}
-static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int sh_eth_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
@@ -1857,11 +1854,11 @@ static int sh_eth_set_settings(struct net_device *ndev,
/* disable tx and rx */
sh_eth_rcv_snd_disable(ndev);
- ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
if (ret)
goto error_exit;
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
mdp->duplex = 1;
else
mdp->duplex = 0;
@@ -2072,11 +2069,11 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_start_aneg(mdp->phydev);
+ ret = phy_start_aneg(ndev->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
@@ -2203,8 +2200,6 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
}
static const struct ethtool_ops sh_eth_ethtool_ops = {
- .get_settings = sh_eth_get_settings,
- .set_settings = sh_eth_set_settings,
.get_regs_len = sh_eth_get_regs_len,
.get_regs = sh_eth_get_regs,
.nway_reset = sh_eth_nway_reset,
@@ -2216,6 +2211,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_sset_count = sh_eth_get_sset_count,
.get_ringparam = sh_eth_get_ringparam,
.set_ringparam = sh_eth_set_ringparam,
+ .get_link_ksettings = sh_eth_get_link_ksettings,
+ .set_link_ksettings = sh_eth_set_link_ksettings,
};
/* network device open function */
@@ -2413,10 +2410,9 @@ static int sh_eth_close(struct net_device *ndev)
sh_eth_dev_exit(ndev);
/* PHY Disconnect */
- if (mdp->phydev) {
- phy_stop(mdp->phydev);
- phy_disconnect(mdp->phydev);
- mdp->phydev = NULL;
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
}
free_irq(ndev->irq, ndev);
@@ -2434,8 +2430,7 @@ static int sh_eth_close(struct net_device *ndev)
/* ioctl to device function */
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- struct phy_device *phydev = mdp->phydev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -2964,6 +2959,8 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index c62380e34a1d..d050f37f3e0f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -518,7 +518,6 @@ struct sh_eth_private {
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
- struct phy_device *phydev; /* PHY device control */
int link;
phy_interface_t phy_interface;
int msg_enable;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 1ab995f7146b..2eb9b49569d5 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <net/neighbour.h>
#include <net/switchdev.h>
@@ -52,6 +53,9 @@ struct rocker_port {
struct rocker_dma_ring_info rx_ring;
};
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker);
+
struct rocker_world_ops;
struct rocker {
@@ -66,6 +70,7 @@ struct rocker {
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
+ struct notifier_block fib_nb;
struct rocker_world_ops *wops;
void *wpriv;
};
@@ -117,11 +122,6 @@ struct rocker_world_ops {
int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
struct switchdev_obj_port_vlan *vlan,
switchdev_obj_dump_cb_t *cb);
- int (*port_obj_fib4_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
- int (*port_obj_fib4_del)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -141,6 +141,11 @@ struct rocker_world_ops {
int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port,
const unsigned char *addr,
__be16 vlan_id);
+ int (*fib4_add)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ int (*fib4_del)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ void (*fib4_abort)(struct rocker *rocker);
};
extern struct rocker_world_ops rocker_ofdpa_ops;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index f0b09b05ed3f..5424fb341613 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1625,29 +1625,6 @@ rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
}
static int
-rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_add)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_add(rocker_port, fib4, trans);
-}
-
-static int
-rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_del)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_del(rocker_port, fib4);
-}
-
-static int
rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -1733,6 +1710,34 @@ static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
}
+static int rocker_world_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_add)
+ return 0;
+ return wops->fib4_add(rocker, fen_info);
+}
+
+static int rocker_world_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_del)
+ return 0;
+ return wops->fib4_del(rocker, fen_info);
+}
+
+static void rocker_world_fib4_abort(struct rocker *rocker)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (wops->fib4_abort)
+ wops->fib4_abort(rocker);
+}
+
/*****************
* Net device ops
*****************/
@@ -2096,11 +2101,6 @@ static int rocker_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_add(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_add(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -2125,10 +2125,6 @@ static int rocker_port_obj_del(struct net_device *dev,
err = rocker_world_port_obj_vlan_del(rocker_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_del(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_del(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -2175,6 +2171,31 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_obj_dump = rocker_port_obj_dump,
};
+static int rocker_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = rocker_world_fib4_add(rocker, fen_info);
+ if (err)
+ rocker_world_fib4_abort(rocker);
+ else
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ rocker_world_fib4_del(rocker, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ rocker_world_fib4_abort(rocker);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
/********************
* ethtool interface
********************/
@@ -2412,7 +2433,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
skb->protocol = eth_type_trans(skb, rocker_port->dev);
if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
- skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+ skb->offload_fwd_mark = 1;
rocker_port->dev->stats.rx_packets++;
rocker_port->dev->stats.rx_bytes += skb->len;
@@ -2740,6 +2761,9 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_ports;
}
+ rocker->fib_nb.notifier_call = rocker_router_fib_event;
+ register_fib_notifier(&rocker->fib_nb);
+
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
(int)sizeof(rocker->hw.id), &rocker->hw.id);
@@ -2771,6 +2795,7 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
rocker_remove_ports(rocker);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -2799,6 +2824,37 @@ static bool rocker_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &rocker_port_netdev_ops;
}
+static bool rocker_port_dev_check_under(const struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ if (!rocker_port_dev_check(dev))
+ return false;
+
+ rocker_port = netdev_priv(dev);
+ if (rocker_port->rocker != rocker)
+ return false;
+
+ return true;
+}
+
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (rocker_port_dev_check_under(dev, rocker))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (rocker_port_dev_check_under(lower_dev, rocker))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 1ca796316173..431a60804272 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -99,6 +99,7 @@ struct ofdpa_flow_tbl_entry {
struct ofdpa_flow_tbl_key key;
size_t key_len;
u32 key_crc32; /* key */
+ struct fib_info *fi;
};
struct ofdpa_group_tbl_entry {
@@ -189,6 +190,7 @@ struct ofdpa {
spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
unsigned long ageing_time;
+ bool fib_aborted;
};
struct ofdpa_port {
@@ -1043,7 +1045,8 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
- u32 group_id, int flags)
+ u32 group_id, struct fib_info *fi,
+ int flags)
{
struct ofdpa_flow_tbl_entry *entry;
@@ -1060,6 +1063,7 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
entry->key.ucast_routing.group_id = group_id;
entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
ucast_routing.group_id);
+ entry->fi = fi;
return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
}
@@ -1425,7 +1429,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
- group_id, flags);
+ group_id, NULL, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
@@ -2390,7 +2394,7 @@ found:
static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
struct switchdev_trans *trans, __be32 dst,
- int dst_len, const struct fib_info *fi,
+ int dst_len, struct fib_info *fi,
u32 tb_id, int flags)
{
const struct fib_nh *nh;
@@ -2426,7 +2430,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
- group_id, flags);
+ group_id, fi, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
err, &dst);
@@ -2558,7 +2562,6 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
int err;
- switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false);
rocker_port_set_learning(rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
@@ -2719,28 +2722,6 @@ static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
return err;
}
-static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, trans,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id, 0);
-}
-
-static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id,
- OFDPA_OP_FLAG_REMOVE);
-}
-
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -2817,7 +2798,6 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
ofdpa_port->bridge_dev = bridge;
- switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true);
return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
}
@@ -2836,8 +2816,6 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
ofdpa_port_internal_vlan_id_get(ofdpa_port,
ofdpa_port->dev->ifindex);
- switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev,
- false);
ofdpa_port->bridge_dev = NULL;
err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
@@ -2926,6 +2904,82 @@ static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
}
+static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ rocker_port = rocker_port_dev_lower_find(dev, rocker);
+ return rocker_port ? rocker_port->wpriv : NULL;
+}
+
+static int ofdpa_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ int err;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, 0);
+ if (err)
+ return err;
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
+}
+
+static int ofdpa_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ fib_info_offload_dec(fen_info->fi);
+ return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
+}
+
+static void ofdpa_fib4_abort(struct rocker *rocker)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ struct ofdpa_flow_tbl_entry *flow_entry;
+ struct hlist_node *tmp;
+ unsigned long flags;
+ int bkt;
+
+ if (ofdpa->fib_aborted)
+ return;
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
+ if (flow_entry->key.tbl_id !=
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
+ continue;
+ ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
+ rocker);
+ if (!ofdpa_port)
+ continue;
+ fib_info_offload_dec(flow_entry->fi);
+ ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+ flow_entry);
+ }
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
+ ofdpa->fib_aborted = true;
+}
+
struct rocker_world_ops rocker_ofdpa_ops = {
.kind = "ofdpa",
.priv_size = sizeof(struct ofdpa),
@@ -2945,8 +2999,6 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
.port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
- .port_obj_fib4_add = ofdpa_port_obj_fib4_add,
- .port_obj_fib4_del = ofdpa_port_obj_fib4_del,
.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
.port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
@@ -2955,4 +3007,7 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_neigh_update = ofdpa_port_neigh_update,
.port_neigh_destroy = ofdpa_port_neigh_destroy,
.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
+ .fib4_add = ofdpa_fib4_add,
+ .fib4_del = ofdpa_fib4_del,
+ .fib4_abort = ofdpa_fib4_abort,
};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e00a669e9e09..00279da6a1e8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -177,7 +177,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
@@ -188,7 +188,7 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
- if (outlen < sizeof(outbuf)) {
+ if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
netif_err(efx, drv, efx->net_dev,
"unable to read datapath firmware capabilities\n");
return -EIO;
@@ -197,6 +197,12 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_FLAGS2);
+ else
+ nic_data->datapath_caps2 = 0;
+
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
nic_data->rx_dpcpu_fw_id =
@@ -227,6 +233,116 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
+static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int implemented;
+ unsigned int enabled;
+ int rc;
+
+ nic_data->workaround_35388 = false;
+ nic_data->workaround_61265 = false;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+
+ if (rc == -ENOSYS) {
+ /* Firmware without GET_WORKAROUNDS - not a problem. */
+ rc = 0;
+ } else if (rc == 0) {
+ /* Bug61265 workaround is always enabled if implemented. */
+ if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
+ nic_data->workaround_61265 = true;
+
+ if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
+ nic_data->workaround_35388 = true;
+ } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
+ /* Workaround is implemented but not enabled.
+ * Try to enable it.
+ */
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG35388,
+ true, NULL);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ /* If we failed to set the workaround just carry on. */
+ rc = 0;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 61265 is %sabled\n",
+ nic_data->workaround_61265 ? "en" : "dis");
+
+ return rc;
+}
+
+static void efx_ef10_process_timer_config(struct efx_nic *efx,
+ const efx_dword_t *data)
+{
+ unsigned int max_count;
+
+ if (EFX_EF10_WORKAROUND_61265(efx)) {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
+ efx->timer_max_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
+ } else if (EFX_EF10_WORKAROUND_35388(efx)) {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
+ max_count = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
+ efx->timer_max_ns = max_count * efx->timer_quantum_ns;
+ } else {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
+ max_count = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
+ efx->timer_max_ns = max_count * efx->timer_quantum_ns;
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "got timer properties from MC: quantum %u ns; max %u ns\n",
+ efx->timer_quantum_ns, efx->timer_max_ns);
+}
+
+static int efx_ef10_get_timer_config(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
+ int rc;
+
+ rc = efx_ef10_get_timer_workarounds(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+
+ if (rc == 0) {
+ efx_ef10_process_timer_config(efx, outbuf);
+ } else if (rc == -ENOSYS || rc == -EPERM) {
+ /* Not available - fall back to Huntington defaults. */
+ unsigned int quantum;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ return rc;
+
+ quantum = 1536000 / rc; /* 1536 cycles */
+ efx->timer_quantum_ns = quantum;
+ efx->timer_max_ns = efx->type->timer_period_max * quantum;
+ rc = 0;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
+ MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
+ NULL, 0, rc);
+ }
+
+ return rc;
+}
+
static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
@@ -527,32 +643,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail5;
- rc = efx_ef10_get_sysclk_freq(efx);
+ rc = efx_ef10_get_timer_config(efx);
if (rc < 0)
goto fail5;
- efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
-
- /* Check whether firmware supports bug 35388 workaround.
- * First try to enable it, then if we get EPERM, just
- * ask if it's already enabled
- */
- rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
- if (rc == 0) {
- nic_data->workaround_35388 = true;
- } else if (rc == -EPERM) {
- unsigned int enabled;
-
- rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
- if (rc)
- goto fail3;
- nic_data->workaround_35388 = enabled &
- MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
- } else if (rc != -ENOSYS && rc != -ENOENT) {
- goto fail5;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "workaround for bug 35388 is %sabled\n",
- nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
if (rc && rc != -EPERM)
@@ -1440,9 +1533,10 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
-/* These statistics are only provided by the 10G MAC. For a 10G/40G
- * switchable port we do not expose these because they might not
- * include all the packets they should.
+/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
+ * For a 10G/40G switchable port we do not expose these because they might
+ * not include all the packets they should.
+ * On 8000 series NICs these statistics are always provided.
*/
#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
(1ULL << EF10_STAT_port_tx_lt64) | \
@@ -1488,10 +1582,15 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
return 0;
- if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
- else
+ /* 8000 series have everything even at 40G */
+ if (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+ } else {
raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+ }
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
@@ -1617,7 +1716,6 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
efx_ef10_get_stat_mask(efx, mask);
dma_stats = efx->stats_buffer.addr;
- nic_data = efx->nic_data;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
@@ -1744,27 +1842,43 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- unsigned int mode, value;
+ unsigned int mode, usecs;
efx_dword_t timer_cmd;
- if (channel->irq_moderation) {
+ if (channel->irq_moderation_us) {
mode = 3;
- value = channel->irq_moderation - 1;
+ usecs = channel->irq_moderation_us;
} else {
mode = 0;
- value = 0;
+ usecs = 0;
}
- if (EFX_EF10_WORKAROUND_35388(efx)) {
+ if (EFX_EF10_WORKAROUND_61265(efx)) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
+ unsigned int ns = usecs * 1000;
+
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
+ channel->channel);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
+ inbuf, sizeof(inbuf), 0, NULL, 0);
+ } else if (EFX_EF10_WORKAROUND_35388(efx)) {
+ unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
+
EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
EFE_DD_EVQ_IND_TIMER_FLAGS,
ERF_DD_EVQ_IND_TIMER_MODE, mode,
- ERF_DD_EVQ_IND_TIMER_VAL, value);
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
channel->channel);
} else {
+ unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
+
EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, value);
+ ERF_DZ_TC_TIMER_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
channel->channel);
}
@@ -1935,14 +2049,18 @@ static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+static int efx_ef10_irq_test_generate(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+ if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
+ NULL) == 0)
+ return -ENOTSUPP;
+
BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
- (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
inbuf, sizeof(inbuf), NULL, 0, NULL);
}
@@ -2536,13 +2654,12 @@ fail:
static int efx_ef10_ev_init(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
- EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- bool supports_rx_merge;
size_t inlen, outlen;
unsigned int enabled, implemented;
dma_addr_t dma_addr;
@@ -2550,9 +2667,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
int i;
nic_data = efx->nic_data;
- supports_rx_merge =
- !!(nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
@@ -2561,11 +2675,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
/* INIT_EVQ expects index in vector table, not absolute */
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
- MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
- INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_IN_FLAG_TX_MERGE, 1,
- INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
@@ -2574,6 +2683,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+ if (nic_data->datapath_caps2 &
+ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
+ /* Use the new generic approach to specifying event queue
+ * configuration, requesting lower latency or higher throughput.
+ * The options that actually get used appear in the output.
+ */
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_V2_IN_FLAG_TYPE,
+ MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
+ } else {
+ bool cut_thru = !(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
+ }
+
dma_addr = channel->eventq.buf.dma_addr;
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
@@ -2584,6 +2714,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
+
+ if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %d using event queue flags %08x\n",
+ channel->channel,
+ MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+
/* IRQ return is ignored */
if (channel->channel || rc)
return rc;
@@ -2591,8 +2728,8 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
/* Successfully created event queue on channel 0 */
rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before the bug26807
- * workaround, thus the latter must be unavailable in this fw
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
*/
nic_data->workaround_26807 = false;
rc = 0;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 14b821b1c880..3cf3557106c2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -281,6 +281,27 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
* NAPI guarantees serialisation of polls of the same device, which
* provides the guarantee required by efx_process_channel().
*/
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
@@ -301,22 +322,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
- if (unlikely(channel->irq_mod_score <
- irq_adapt_low_thresh)) {
- if (channel->irq_moderation > 1) {
- channel->irq_moderation -= 1;
- efx->type->push_irq_moderation(channel);
- }
- } else if (unlikely(channel->irq_mod_score >
- irq_adapt_high_thresh)) {
- if (channel->irq_moderation <
- efx->irq_rx_moderation) {
- channel->irq_moderation += 1;
- efx->type->push_irq_moderation(channel);
- }
- }
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
+ efx_update_irq_mod(efx, channel);
}
efx_filter_rfs_expire(channel);
@@ -1703,6 +1709,7 @@ static int efx_probe_nic(struct efx_nic *efx)
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
+ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true);
@@ -1949,14 +1956,21 @@ static void efx_remove_all(struct efx_nic *efx)
* Interrupt moderation
*
**************************************************************************/
-
-static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
{
if (usecs == 0)
return 0;
- if (usecs * 1000 < quantum_ns)
+ if (usecs * 1000 < efx->timer_quantum_ns)
return 1; /* never round down to 0 */
- return usecs * 1000 / quantum_ns;
+ return usecs * 1000 / efx->timer_quantum_ns;
+}
+
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
+{
+ /* We must round up when converting ticks to microseconds
+ * because we round down when converting the other way.
+ */
+ return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */
@@ -1965,21 +1979,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx)
{
struct efx_channel *channel;
- unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
- efx->timer_quantum_ns,
- 1000);
- unsigned int tx_ticks;
- unsigned int rx_ticks;
+ unsigned int timer_max_us;
EFX_ASSERT_RESET_SERIALISED(efx);
- if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
- return -EINVAL;
+ timer_max_us = efx->timer_max_ns / 1000;
- tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
- rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
+ if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
+ return -EINVAL;
- if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
"RX and TX IRQ moderation must be equal\n");
@@ -1987,12 +1996,12 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
}
efx->irq_rx_adaptive = rx_adaptive;
- efx->irq_rx_moderation = rx_ticks;
+ efx->irq_rx_moderation_us = rx_usecs;
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel))
- channel->irq_moderation = rx_ticks;
+ channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
- channel->irq_moderation = tx_ticks;
+ channel->irq_moderation_us = tx_usecs;
}
return 0;
@@ -2001,26 +2010,21 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
-
*rx_adaptive = efx->irq_rx_adaptive;
- *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
- efx->timer_quantum_ns,
- 1000);
+ *rx_usecs = efx->irq_rx_moderation_us;
/* If channels are shared between RX and TX, so is IRQ
* moderation. Otherwise, IRQ moderation is the same for all
* TX channels and is not adaptive.
*/
- if (efx->tx_channel_offset == 0)
+ if (efx->tx_channel_offset == 0) {
*tx_usecs = *rx_usecs;
- else
- *tx_usecs = DIV_ROUND_UP(
- efx->channel[efx->tx_channel_offset]->irq_moderation *
- efx->timer_quantum_ns,
- 1000);
+ } else {
+ struct efx_channel *tx_channel;
+
+ tx_channel = efx->channel[efx->tx_channel_offset];
+ *tx_usecs = tx_channel->irq_moderation_us;
+ }
}
/**************************************************************************
@@ -2259,8 +2263,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
rc = efx_check_disabled(efx);
if (rc)
return rc;
- if (new_mtu > EFX_MAX_MTU)
+ if (new_mtu > EFX_MAX_MTU) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big (max: %d)\n",
+ new_mtu, EFX_MAX_MTU);
return -EINVAL;
+ }
+ if (new_mtu < EFX_MIN_MTU) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too small (min: %d)\n",
+ new_mtu, EFX_MIN_MTU);
+ return -EINVAL;
+ }
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index c3ae739e9c7a..342ae16e1f2d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -204,6 +204,8 @@ int efx_try_recovery(struct efx_nic *efx);
/* Global */
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index d790cb8d9db3..1a7092602aec 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -378,12 +378,15 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
/* Set timer register */
- if (channel->irq_moderation) {
+ if (channel->irq_moderation_us) {
+ unsigned int ticks;
+
+ ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
FFE_BB_TIMER_MODE_INT_HLDOFF,
FRF_AB_TC_TIMER_VAL,
- channel->irq_moderation - 1);
+ ticks - 1);
} else {
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
@@ -2373,6 +2376,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
EFX_MAX_CHANNELS);
efx->max_tx_channels = efx->max_channels;
efx->timer_quantum_ns = 4968; /* 621 cycles */
+ efx->timer_max_ns = efx->type->timer_period_max *
+ efx->timer_quantum_ns;
/* Initialise I2C adapter */
board = falcon_board(efx);
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 1736f4b806af..f6883b2b5da3 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -64,7 +64,7 @@
#define LM87_ALARM_TEMP_INT 0x10
#define LM87_ALARM_TEMP_EXT1 0x20
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM87)
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
@@ -455,7 +455,7 @@ static int sfe4001_init(struct efx_nic *efx)
struct falcon_board *board = falcon_board(efx);
int rc;
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4c83739d158f..4762ec444cb8 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1477,9 +1477,10 @@ void efx_farch_irq_disable_master(struct efx_nic *efx)
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
-void efx_farch_irq_test_generate(struct efx_nic *efx)
+int efx_farch_irq_test_generate(struct efx_nic *efx)
{
efx_farch_interrupts(efx, true, true);
+ return 0;
}
/* Process a fatal interrupt
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d28e7dd8fa3c..241520943ada 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -548,7 +548,10 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
err_len, rc);
}
- async->complete(efx, async->cookie, rc, outbuf, data_len);
+
+ if (async->complete)
+ async->complete(efx, async->cookie, rc, outbuf,
+ min(async->outlen, data_len));
kfree(async);
efx_mcdi_release(mcdi);
@@ -1153,7 +1156,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
* acquired locks in the wrong order.
*/
list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
- async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ if (async->complete)
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
list_del(&async->list);
kfree(async);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c9a5b003caaf..ccceafc15896 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2645,16 +2645,20 @@
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
-/* enum: RX DPCPU bus. */
+/* enum: RX0 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
/* enum: TX0 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
/* enum: TX1 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
-/* enum: RX DICPU bus. */
+/* enum: RX0 DICPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
/* enum: TX DICPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
/* Actual value read from RAM / register */
@@ -3612,6 +3616,8 @@
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
@@ -4389,6 +4395,8 @@
* the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
*/
#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
* the workaround
*/
@@ -4413,7 +4421,6 @@
* (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
* output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -5479,6 +5486,8 @@
#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
#define LICENSED_V3_FEATURES_MASK_LBN 0
#define LICENSED_V3_FEATURES_MASK_WIDTH 64
@@ -5634,6 +5643,109 @@
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
/* QUEUE_CRC_MODE structuredef */
#define QUEUE_CRC_MODE_LEN 1
#define QUEUE_CRC_MODE_MODE_LBN 0
@@ -5697,8 +5809,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
-#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
-#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -7854,6 +7966,20 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -7910,6 +8036,288 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) in not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -9026,7 +9434,7 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -10125,7 +10533,9 @@
* that this operation returns a zero-length response
*/
#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
-/* enum: report counts of installed licenses */
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
/* MC_CMD_LICENSING_V3_OUT msgresponse */
@@ -10763,6 +11173,8 @@
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
/***********************************/
@@ -11280,22 +11692,110 @@
#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 4
+#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9ff062a36ea8..99d8c82124bb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -76,6 +76,9 @@
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
+/* Minimum MTU, from RFC791 (IP) */
+#define EFX_MIN_MTU 68
+
/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
* and should be a multiple of the cache line size.
*/
@@ -392,7 +395,7 @@ enum efx_sync_events_state {
* @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
- * @irq_moderation: IRQ moderation value (in hardware ticks)
+ * @irq_moderation_us: IRQ moderation value (in microseconds)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @state: state for NAPI vs busy polling
@@ -433,7 +436,7 @@ struct efx_channel {
bool eventq_init;
bool enabled;
int irq;
- unsigned int irq_moderation;
+ unsigned int irq_moderation_us;
struct net_device *napi_dev;
struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -810,8 +813,10 @@ struct vfdi_status;
* @membase: Memory BAR value
* @interrupt_mode: Interrupt mode
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
- * @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
+ * @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
* @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets
@@ -940,8 +945,10 @@ struct efx_nic {
enum efx_int_mode interrupt_mode;
unsigned int timer_quantum_ns;
+ unsigned int timer_max_ns;
bool irq_rx_adaptive;
- unsigned int irq_rx_moderation;
+ unsigned int irq_mod_step_us;
+ unsigned int irq_rx_moderation_us;
u32 msg_enable;
enum nic_state state;
@@ -1271,7 +1278,7 @@ struct efx_nic_type {
int (*mcdi_poll_reboot)(struct efx_nic *efx);
void (*mcdi_reboot_detected)(struct efx_nic *efx);
void (*irq_enable_master)(struct efx_nic *efx);
- void (*irq_test_generate)(struct efx_nic *efx);
+ int (*irq_test_generate)(struct efx_nic *efx);
void (*irq_disable_non_ev)(struct efx_nic *efx);
irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 89b83e59e1dc..aa1945a858d5 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -66,11 +66,11 @@ void efx_nic_event_test_start(struct efx_channel *channel)
channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_irq_test_start(struct efx_nic *efx)
+int efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx->type->irq_test_generate(efx);
+ return efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 96944c3c9d14..73bee7ea332a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -507,10 +507,13 @@ enum {
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
+ * @workaround_61265: Flag: firmware supports workaround for bug 61265
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of
+ * %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @vport_id: The function's vport ID, only relevant for PFs
@@ -540,8 +543,10 @@ struct efx_ef10_nic_data {
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
+ bool workaround_61265;
bool must_check_datapath_caps;
u32 datapath_caps;
+ u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
unsigned int vport_id;
@@ -741,12 +746,12 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
/* Interrupts */
int efx_nic_init_interrupt(struct efx_nic *efx);
-void efx_nic_irq_test_start(struct efx_nic *efx);
+int efx_nic_irq_test_start(struct efx_nic *efx);
void efx_nic_fini_interrupt(struct efx_nic *efx);
/* Falcon/Siena interrupts */
void efx_farch_irq_enable_master(struct efx_nic *efx);
-void efx_farch_irq_test_generate(struct efx_nic *efx);
+int efx_farch_irq_test_generate(struct efx_nic *efx);
void efx_farch_irq_disable_master(struct efx_nic *efx);
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c771e0af4e06..77a5364f7a10 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1269,13 +1269,13 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
if (IS_ERR(ptp->phc_clock)) {
rc = PTR_ERR(ptp->phc_clock);
goto fail3;
- }
-
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ } else if (ptp->phc_clock) {
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
}
ptp->nic_ts_enabled = false;
@@ -1306,7 +1306,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- channel->irq_moderation = 0;
+ channel->irq_moderation_us = 0;
channel->rx_queue.core_index = 0;
return efx_ptp_probe(efx, channel);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 9d78830da609..cd38b44ae23a 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -135,11 +135,19 @@ static int efx_test_interrupts(struct efx_nic *efx,
{
unsigned long timeout, wait;
int cpu;
+ int rc;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
- efx_nic_irq_test_start(efx);
+ rc = efx_nic_irq_test_start(efx);
+ if (rc == -ENOTSUPP) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "direct interrupt testing not supported\n");
+ tests->interrupt = 0;
+ return 0;
+ }
+
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 009dbe88f3be..32a427253a03 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -28,7 +28,7 @@ struct efx_loopback_self_tests {
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
- * indicates failure.
+ * indicates failure; 0 indicates test could not be run.
*/
struct efx_self_tests {
/* online tests */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 2219b5424d2b..04ed1b4c7cd9 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -34,19 +34,24 @@ static void siena_init_wol(struct efx_nic *efx);
static void siena_push_irq_moderation(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
efx_dword_t timer_cmd;
- if (channel->irq_moderation)
+ if (channel->irq_moderation_us) {
+ unsigned int ticks;
+
+ ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
FRF_CZ_TC_TIMER_VAL,
- channel->irq_moderation - 1);
- else
+ ticks - 1);
+ } else {
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_DIS,
FRF_CZ_TC_TIMER_VAL, 0);
+ }
efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
channel->channel);
}
@@ -222,6 +227,9 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
efx->timer_quantum_ns =
(caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
3072 : 6144; /* 768 cycles */
+ efx->timer_max_ns = efx->type->timer_period_max *
+ efx->timer_quantum_ns;
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 816c44689e67..9abcf4aded30 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -22,7 +22,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -31,6 +31,9 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index 400df526586d..ba1762e7f216 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -16,7 +16,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk);
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 2310b75d4ec2..351cd14cb9f9 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -50,4 +50,8 @@
#define EFX_WORKAROUND_35388(efx) \
(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+/* Moderation timer access must go through MCDI */
+#define EFX_EF10_WORKAROUND_61265(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
+
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 95001ee408ab..6f85276376e8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1426,7 +1426,7 @@ static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
rx_flags |= RxATX;
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Can accept Jumbo packet */
rx_flags |= RxAJAB;
#endif
@@ -1750,7 +1750,7 @@ static int sis900_rx(struct net_device *net_dev)
data_size = rx_status & DSIZE;
rx_size = data_size - CRC_SIZE;
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* ``TOOLONG'' flag means jumbo packet received. */
if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
rx_status &= (~ ((unsigned int)TOOLONG));
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 7d430d322931..f0da3dc52c01 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -310,7 +310,7 @@ enum sis630_revision_id {
#define CRC_SIZE 4
#define MAC_HEADER_SIZE 14
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define MAX_FRAME_SIZE (1518 + 4)
#else
#define MAX_FRAME_SIZE 1518
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 503a3b6dce91..73212590d04a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2323,6 +2323,9 @@ static int smc_drv_probe(struct platform_device *pdev)
} else {
lp->cfg.flags |= SMC91X_USE_16BIT;
}
+ if (!device_property_read_u32(&pdev->dev, "reg-shift",
+ &val))
+ lp->io_shift = val;
}
#endif
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4f8910b7db2e..e9b8579e6241 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/gpio/consumer.h>
#include "smsc911x.h"
@@ -147,6 +148,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+ /* Reset GPIO */
+ struct gpio_desc *reset_gpiod;
+
/* clock */
struct clk *clk;
};
@@ -438,6 +442,11 @@ static int smsc911x_request_resources(struct platform_device *pdev)
netdev_err(ndev, "couldn't get regulators %d\n",
ret);
+ /* Request optional RESET GPIO */
+ pdata->reset_gpiod = devm_gpiod_get_optional(&pdev->dev,
+ "reset",
+ GPIOD_OUT_LOW);
+
/* Request clock */
pdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk))
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 8f06a6621ab1..c732b8ce2528 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -104,6 +104,18 @@ config DWMAC_STI
device driver. This driver is used on for the STi series
SOCs GMAC ethernet controller.
+config DWMAC_STM32
+ tristate "STM32 DWMAC support"
+ default ARCH_STM32
+ depends on OF && HAS_IOMEM
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on STM32 SOCs.
+
+ This selects STM32 SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STM32 series
+ SOCs GMAC ethernet controller.
+
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 44b630cd1755..f0c9396fa28e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2533b91f1421..d3292c4a6eda 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -30,7 +30,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/module.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 92105916ef40..3740a4417fa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
#include "stmmac_platform.h"
@@ -301,6 +302,118 @@ static const struct rk_gmac_ops rk3288_ops = {
.set_rmii_speed = rk3288_set_rmii_speed,
};
+#define RK3366_GRF_SOC_CON6 0x0418
+#define RK3366_GRF_SOC_CON7 0x041c
+
+/* RK3366_GRF_SOC_CON6 */
+#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3366_GRF_SOC_CON7 */
+#define RK3366_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
+ RK3366_GMAC_RXCLK_DLY_ENABLE |
+ RK3366_GMAC_TXCLK_DLY_ENABLE |
+ RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+}
+
+static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_2_5M |
+ RK3366_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_25M |
+ RK3366_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3366_ops = {
+ .set_to_rgmii = rk3366_set_to_rgmii,
+ .set_to_rmii = rk3366_set_to_rmii,
+ .set_rgmii_speed = rk3366_set_rgmii_speed,
+ .set_rmii_speed = rk3366_set_rmii_speed,
+};
+
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
@@ -413,6 +526,118 @@ static const struct rk_gmac_ops rk3368_ops = {
.set_rmii_speed = rk3368_set_rmii_speed,
};
+#define RK3399_GRF_SOC_CON5 0xc214
+#define RK3399_GRF_SOC_CON6 0xc218
+
+/* RK3399_GRF_SOC_CON5 */
+#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3399_GRF_SOC_CON6 */
+#define RK3399_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
+ RK3399_GMAC_RXCLK_DLY_ENABLE |
+ RK3399_GMAC_TXCLK_DLY_ENABLE |
+ RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+}
+
+static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_2_5M |
+ RK3399_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_25M |
+ RK3399_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
+ .set_to_rmii = rk3399_set_to_rmii,
+ .set_rgmii_speed = rk3399_set_rgmii_speed,
+ .set_rmii_speed = rk3399_set_rmii_speed,
+};
+
static int gmac_clk_init(struct rk_priv_data *bsp_priv)
{
struct device *dev = &bsp_priv->pdev->dev;
@@ -629,6 +854,16 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
"rockchip,grf");
bsp_priv->pdev = pdev;
+ gmac_clk_init(bsp_priv);
+
+ return bsp_priv;
+}
+
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
+{
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
dev_info(dev, "init for RGMII\n");
@@ -641,15 +876,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_err(dev, "NO interface defined!\n");
}
- gmac_clk_init(bsp_priv);
-
- return bsp_priv;
-}
-
-static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
-{
- int ret;
-
ret = phy_power_on(bsp_priv, true);
if (ret)
return ret;
@@ -658,11 +884,19 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
if (ret)
return ret;
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
+ struct device *dev = &gmac->pdev->dev;
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
@@ -760,7 +994,9 @@ static int rk_gmac_probe(struct platform_device *pdev)
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
+ { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
new file mode 100644
index 000000000000..e5a926b8bee7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -0,0 +1,194 @@
+/*
+ * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
+ *
+ * Copyright (C) Alexandre Torgue 2015
+ * Author: Alexandre Torgue <alexandre.torgue@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define MII_PHY_SEL_MASK BIT(23)
+
+struct stm32_dwmac {
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+ u32 mode_reg; /* MAC glue-logic mode register */
+ struct regmap *regmap;
+ u32 speed;
+};
+
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ u32 val;
+ int ret;
+
+ val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1;
+ ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk_tx);
+
+ return ret;
+}
+
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_rx);
+}
+
+static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err;
+
+ /* Get TX/RX clocks */
+ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "No tx clock provided...\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+ dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx");
+ if (IS_ERR(dwmac->clk_rx)) {
+ dev_err(dev, "No rx clock provided...\n");
+ return PTR_ERR(dwmac->clk_rx);
+ }
+
+ /* Get mode register */
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(dwmac->regmap))
+ return PTR_ERR(dwmac->regmap);
+
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
+ if (err)
+ dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+
+ return err;
+}
+
+static int stm32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct stm32_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+
+ ret = stm32_dwmac_init(plat_dat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ stm32_dwmac_clk_disable(dwmac);
+
+ return ret;
+}
+
+static int stm32_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dwmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ return ret;
+}
+
+static int stm32_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = stm32_dwmac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
+ stm32_dwmac_suspend, stm32_dwmac_resume);
+
+static const struct of_device_id stm32_dwmac_match[] = {
+ { .compatible = "st,stm32-dwmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
+
+static struct platform_driver stm32_dwmac_driver = {
+ .probe = stm32_dwmac_probe,
+ .remove = stm32_dwmac_remove,
+ .driver = {
+ .name = "stm32-dwmac",
+ .pm = &stm32_dwmac_pm_ops,
+ .of_match_table = stm32_dwmac_match,
+ },
+};
+module_platform_driver(stm32_dwmac_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 885a5e64519d..7df4ff158f3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -145,7 +145,7 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
numhashregs = 8;
break;
default:
- pr_debug("STMMAC: err in setting mulitcast filter\n");
+ pr_debug("STMMAC: err in setting multicast filter\n");
return;
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 756bb548e81a..0a0d6a86f397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -265,6 +265,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
* once needed on other platforms.
*/
if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.50a") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
of_device_is_compatible(np, "snps,dwmac")) {
/* Note that the max-frame-size parameter as defined in the
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 170a18b61281..6e3b82972ce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -187,7 +187,7 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
- } else
+ } else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 4490ebaed127..0d0053128542 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -2743,7 +2743,7 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
lp->msg_enable = msglevel;
}
-static struct ethtool_ops dwceqos_ethtool_ops = {
+static const struct ethtool_ops dwceqos_ethtool_ops = {
.get_drvinfo = dwceqos_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_pauseparam = dwceqos_get_pauseparam,
@@ -2761,7 +2761,7 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static struct net_device_ops netdev_ops = {
+static const struct net_device_ops netdev_ops = {
.ndo_open = dwceqos_open,
.ndo_stop = dwceqos_stop,
.ndo_start_xmit = dwceqos_start_xmit,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index d300d536d06f..fa0cfda24fd9 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -546,7 +546,8 @@ fatal_error:
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int queue, len;
+ int queue;
+ unsigned int len;
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
@@ -556,7 +557,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
- len = max(skb->len, ETH_ZLEN);
+ len = max_t(unsigned int, skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
netif_stop_subqueue(dev, queue);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f85d605e4560..c6cff3d2ff05 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@ do { \
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPDMA_TX_PRIORITY_MAP 0x01234567
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_ALE_VLAN_AWARE 1
@@ -140,9 +140,11 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_slave_index(priv) \
- ((priv->data.dual_emac) ? priv->emac_port : \
- priv->data.active_slave)
+#define cpsw_slave_index(cpsw, priv) \
+ ((cpsw->data.dual_emac) ? priv->emac_port : \
+ cpsw->data.active_slave)
+#define IRQ_NUM 2
+#define CPSW_MAX_QUEUES 8
static int debug_level;
module_param(debug_level, int, 0);
@@ -363,38 +365,41 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
__raw_writel(val, slave->regs + offset);
}
-struct cpsw_priv {
- struct platform_device *pdev;
- struct net_device *ndev;
- struct napi_struct napi_rx;
- struct napi_struct napi_tx;
+struct cpsw_common {
struct device *dev;
struct cpsw_platform_data data;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct cpsw_ss_regs __iomem *regs;
struct cpsw_wr_regs __iomem *wr_regs;
u8 __iomem *hw_stats;
struct cpsw_host_regs __iomem *host_port_regs;
- u32 msg_enable;
u32 version;
u32 coal_intvl;
u32 bus_freq_mhz;
int rx_packet_max;
- struct clk *clk;
- u8 mac_addr[ETH_ALEN];
struct cpsw_slave *slaves;
struct cpdma_ctlr *dma;
- struct cpdma_chan *txch, *rxch;
+ struct cpdma_chan *txch[CPSW_MAX_QUEUES];
+ struct cpdma_chan *rxch[CPSW_MAX_QUEUES];
struct cpsw_ale *ale;
- bool rx_pause;
- bool tx_pause;
bool quirk_irq;
bool rx_irq_disabled;
bool tx_irq_disabled;
- /* snapshot of IRQ numbers */
- u32 irqs_table[4];
- u32 num_irqs;
- struct cpts *cpts;
+ u32 irqs_table[IRQ_NUM];
+ struct cpts *cpts;
+ int rx_ch_num, tx_ch_num;
+};
+
+struct cpsw_priv {
+ struct net_device *ndev;
+ struct device *dev;
+ u32 msg_enable;
+ u8 mac_addr[ETH_ALEN];
+ bool rx_pause;
+ bool tx_pause;
u32 emac_port;
+ struct cpsw_common *cpsw;
};
struct cpsw_stats {
@@ -455,108 +460,92 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
- { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
- { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
- { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
- { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
- { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
- { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
- { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
- { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
- { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
- { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
- { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
- { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
- { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
- { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
- { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
- { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
- { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
- { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
- { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
- { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
- { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
- { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
- { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
- { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
- { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
- { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
};
-#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+ { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "misqueued", CPDMA_RX_STAT(misqueued) },
+ { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "requeue", CPDMA_RX_STAT(requeue) },
+ { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
-#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
+#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
+ struct cpsw_common *cpsw = (priv)->cpsw; \
int n; \
- if (priv->data.dual_emac) \
- (func)((priv)->slaves + priv->emac_port, ##arg);\
+ if (cpsw->data.dual_emac) \
+ (func)((cpsw)->slaves + priv->emac_port, ##arg);\
else \
- for (n = (priv)->data.slaves, \
- slave = (priv)->slaves; \
+ for (n = cpsw->data.slaves, \
+ slave = cpsw->slaves; \
n; n--) \
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_get_slave_ndev(priv, __slave_no__) \
- ((__slave_no__ < priv->data.slaves) ? \
- priv->slaves[__slave_no__].ndev : NULL)
-#define cpsw_get_slave_priv(priv, __slave_no__) \
- (((__slave_no__ < priv->data.slaves) && \
- (priv->slaves[__slave_no__].ndev)) ? \
- netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
-
-#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+
+#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
do { \
- if (!priv->data.dual_emac) \
+ if (!cpsw->data.dual_emac) \
break; \
if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw_get_slave_ndev(priv, 0); \
- priv = netdev_priv(ndev); \
+ ndev = cpsw->slaves[0].ndev; \
skb->dev = ndev; \
} else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw_get_slave_ndev(priv, 1); \
- priv = netdev_priv(ndev); \
+ ndev = cpsw->slaves[1].ndev; \
skb->dev = ndev; \
} \
} while (0)
-#define cpsw_add_mcast(priv, addr) \
+#define cpsw_add_mcast(cpsw, priv, addr) \
do { \
- if (priv->data.dual_emac) { \
- struct cpsw_slave *slave = priv->slaves + \
+ if (cpsw->data.dual_emac) { \
+ struct cpsw_slave *slave = cpsw->slaves + \
priv->emac_port; \
- int slave_port = cpsw_get_slave_port(priv, \
+ int slave_port = cpsw_get_slave_port( \
slave->slave_num); \
- cpsw_ale_add_mcast(priv->ale, addr, \
+ cpsw_ale_add_mcast(cpsw->ale, addr, \
1 << slave_port | ALE_PORT_HOST, \
ALE_VLAN, slave->port_vlan, 0); \
} else { \
- cpsw_ale_add_mcast(priv->ale, addr, \
+ cpsw_ale_add_mcast(cpsw->ale, addr, \
ALE_ALL_PORTS, \
0, 0, 0); \
} \
} while (0)
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_ale *ale = priv->ale;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_ale *ale = cpsw->ale;
int i;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
bool flag = false;
/* Enabling promiscuous mode for one interface will be
* common for both the interface as the interface shares
* the same hardware resource.
*/
- for (i = 0; i < priv->data.slaves; i++)
- if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
if (!enable && flag) {
@@ -579,7 +568,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
unsigned long timeout = jiffies + HZ;
/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -606,7 +595,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -620,17 +609,18 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int vid;
- if (priv->data.dual_emac)
- vid = priv->slaves[priv->emac_port].port_vlan;
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
else
- vid = priv->data.default_vlan;
+ vid = cpsw->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
- cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
return;
} else {
/* Disable promiscuous mode */
@@ -638,51 +628,54 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
+ cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(priv, (u8 *)ha->addr);
+ cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
}
}
}
-static void cpsw_intr_enable(struct cpsw_priv *priv)
+static void cpsw_intr_enable(struct cpsw_common *cpsw)
{
- __raw_writel(0xFF, &priv->wr_regs->tx_en);
- __raw_writel(0xFF, &priv->wr_regs->rx_en);
+ __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
+ __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
- cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
return;
}
-static void cpsw_intr_disable(struct cpsw_priv *priv)
+static void cpsw_intr_disable(struct cpsw_common *cpsw)
{
- __raw_writel(0, &priv->wr_regs->tx_en);
- __raw_writel(0, &priv->wr_regs->rx_en);
+ __raw_writel(0, &cpsw->wr_regs->tx_en);
+ __raw_writel(0, &cpsw->wr_regs->rx_en);
- cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
return;
}
static void cpsw_tx_handler(void *token, int len, int status)
{
+ struct netdev_queue *txq;
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Check whether the queue is stopped due to stalled tx dma, if the
* queue is stopped then start the queue as we have free desc for tx
*/
- if (unlikely(netif_queue_stopped(ndev)))
- netif_wake_queue(ndev);
- cpts_tx_timestamp(priv->cpts, skb);
+ txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ cpts_tx_timestamp(cpsw->cpts, skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -690,22 +683,23 @@ static void cpsw_tx_handler(void *token, int len, int status)
static void cpsw_rx_handler(void *token, int len, int status)
{
+ struct cpdma_chan *ch;
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+ cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
bool ndev_status = false;
- struct cpsw_slave *slave = priv->slaves;
+ struct cpsw_slave *slave = cpsw->slaves;
int n;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
/* In dual emac mode check for all interfaces */
- for (n = priv->data.slaves; n; n--, slave++)
+ for (n = cpsw->data.slaves; n; n--, slave++)
if (netif_running(slave->ndev))
ndev_status = true;
}
@@ -726,10 +720,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
return;
}
- new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
+ new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
if (new_skb) {
+ skb_copy_queue_mapping(new_skb, skb);
skb_put(skb, len);
- cpts_rx_timestamp(priv->cpts, skb);
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -741,83 +736,117 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
requeue:
- ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
- skb_tailroom(new_skb), 0);
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ return;
+ }
+
+ ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
+ ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
+ skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
dev_kfree_skb_any(new_skb);
}
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
- struct cpsw_priv *priv = dev_id;
+ struct cpsw_common *cpsw = dev_id;
- writel(0, &priv->wr_regs->tx_en);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[1]);
- priv->tx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
}
- napi_schedule(&priv->napi_tx);
+ napi_schedule(&cpsw->napi_tx);
return IRQ_HANDLED;
}
static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
{
- struct cpsw_priv *priv = dev_id;
+ struct cpsw_common *cpsw = dev_id;
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- writel(0, &priv->wr_regs->rx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ writel(0, &cpsw->wr_regs->rx_en);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[0]);
- priv->rx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
}
- napi_schedule(&priv->napi_rx);
+ napi_schedule(&cpsw->napi_rx);
return IRQ_HANDLED;
}
static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi_tx);
- int num_tx;
+ u32 ch_map;
+ int num_tx, ch;
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) {
+ if (!ch_map) {
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ if (!ch_map)
+ break;
+
+ ch = 0;
+ }
+
+ if (!(ch_map & 0x01))
+ continue;
+
+ num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx);
+ }
- num_tx = cpdma_chan_process(priv->txch, budget);
if (num_tx < budget) {
napi_complete(napi_tx);
- writel(0xff, &priv->wr_regs->tx_en);
- if (priv->quirk_irq && priv->tx_irq_disabled) {
- priv->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
}
- if (num_tx)
- cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
-
return num_tx;
}
static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi_rx);
- int num_rx;
+ u32 ch_map;
+ int num_rx, ch;
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) {
+ if (!ch_map) {
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ if (!ch_map)
+ break;
+
+ ch = 0;
+ }
+
+ if (!(ch_map & 0x01))
+ continue;
+
+ num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx);
+ }
- num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi_rx);
- writel(0xff, &priv->wr_regs->rx_en);
- if (priv->quirk_irq && priv->rx_irq_disabled) {
- priv->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
}
- if (num_rx)
- cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
-
return num_rx;
}
@@ -850,17 +879,18 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct phy_device *phy = slave->phy;
u32 mac_control = 0;
u32 slave_port;
+ struct cpsw_common *cpsw = priv->cpsw;
if (!phy)
return;
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
if (phy->link) {
- mac_control = priv->data.mac_control;
+ mac_control = cpsw->data.mac_control;
/* enable forwarding */
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
if (phy->speed == 1000)
@@ -884,7 +914,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
} else {
mac_control = 0;
/* disable forwarding */
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
@@ -906,19 +936,19 @@ static void cpsw_adjust_link(struct net_device *ndev)
if (link) {
netif_carrier_on(ndev);
if (netif_running(ndev))
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
} else {
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
+ netif_tx_stop_all_queues(ndev);
}
}
static int cpsw_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- coal->rx_coalesce_usecs = priv->coal_intvl;
+ coal->rx_coalesce_usecs = cpsw->coal_intvl;
return 0;
}
@@ -931,11 +961,12 @@ static int cpsw_set_coalesce(struct net_device *ndev,
u32 prescale = 0;
u32 addnl_dvdr = 1;
u32 coal_intvl = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
coal_intvl = coal->rx_coalesce_usecs;
- int_ctrl = readl(&priv->wr_regs->int_control);
- prescale = priv->bus_freq_mhz * 4;
+ int_ctrl = readl(&cpsw->wr_regs->int_control);
+ prescale = cpsw->bus_freq_mhz * 4;
if (!coal->rx_coalesce_usecs) {
int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
@@ -963,53 +994,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
}
num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
- writel(num_interrupts, &priv->wr_regs->rx_imax);
- writel(num_interrupts, &priv->wr_regs->tx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->tx_imax);
int_ctrl |= CPSW_INTPACEEN;
int_ctrl &= (~CPSW_INTPRESCALE_MASK);
int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
update_return:
- writel(int_ctrl, &priv->wr_regs->int_control);
+ writel(int_ctrl, &cpsw->wr_regs->int_control);
cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
- if (priv->data.dual_emac) {
- int i;
-
- for (i = 0; i < priv->data.slaves; i++) {
- priv = netdev_priv(priv->slaves[i].ndev);
- priv->coal_intvl = coal_intvl;
- }
- } else {
- priv->coal_intvl = coal_intvl;
- }
+ cpsw->coal_intvl = coal_intvl;
return 0;
}
static int cpsw_get_sset_count(struct net_device *ndev, int sset)
{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
switch (sset) {
case ETH_SS_STATS:
- return CPSW_STATS_LEN;
+ return (CPSW_STATS_COMMON_LEN +
+ (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+ CPSW_STATS_CH_LEN);
default:
return -EOPNOTSUPP;
}
}
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+ int ch_stats_len;
+ int line;
+ int i;
+
+ ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+ for (i = 0; i < ch_stats_len; i++) {
+ line = i % CPSW_STATS_CH_LEN;
+ snprintf(*p, ETH_GSTRING_LEN,
+ "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
+ i / CPSW_STATS_CH_LEN,
+ cpsw_gstrings_ch_stats[line].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < CPSW_STATS_LEN; i++) {
+ for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
memcpy(p, cpsw_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+ cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
break;
}
}
@@ -1017,86 +1064,78 @@ static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
static void cpsw_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpdma_chan_stats rx_stats;
- struct cpdma_chan_stats tx_stats;
- u32 val;
u8 *p;
- int i;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpdma_chan_stats ch_stats;
+ int i, l, ch;
/* Collect Davinci CPDMA stats for Rx and Tx Channel */
- cpdma_chan_get_stats(priv->rxch, &rx_stats);
- cpdma_chan_get_stats(priv->txch, &tx_stats);
-
- for (i = 0; i < CPSW_STATS_LEN; i++) {
- switch (cpsw_gstrings_stats[i].type) {
- case CPSW_STATS:
- val = readl(priv->hw_stats +
- cpsw_gstrings_stats[i].stat_offset);
- data[i] = val;
- break;
-
- case CPDMA_RX_STATS:
- p = (u8 *)&rx_stats +
- cpsw_gstrings_stats[i].stat_offset;
- data[i] = *(u32 *)p;
- break;
+ for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+ data[l] = readl(cpsw->hw_stats +
+ cpsw_gstrings_stats[l].stat_offset);
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
- case CPDMA_TX_STATS:
- p = (u8 *)&tx_stats +
- cpsw_gstrings_stats[i].stat_offset;
- data[i] = *(u32 *)p;
- break;
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
}
}
}
-static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
{
u32 i;
u32 usage_count = 0;
- if (!priv->data.dual_emac)
+ if (!cpsw->data.dual_emac)
return 0;
- for (i = 0; i < priv->data.slaves; i++)
- if (priv->slaves[i].open_stat)
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].open_stat)
usage_count++;
return usage_count;
}
-static inline int cpsw_tx_packet_submit(struct net_device *ndev,
- struct cpsw_priv *priv, struct sk_buff *skb)
+static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
+ struct sk_buff *skb,
+ struct cpdma_chan *txch)
{
- if (!priv->data.dual_emac)
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 0);
+ struct cpsw_common *cpsw = priv->cpsw;
- if (ndev == cpsw_get_slave_ndev(priv, 0))
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 1);
- else
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 2);
+ return cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port + cpsw->data.dual_emac);
}
static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
{
+ struct cpsw_common *cpsw = priv->cpsw;
u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
- if (priv->version == CPSW_VERSION_1)
+ if (cpsw->version == CPSW_VERSION_1)
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
else
slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
- cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
port_mask, port_mask, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1110,13 +1149,14 @@ static void soft_reset_slave(struct cpsw_slave *slave)
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
+ struct cpsw_common *cpsw = priv->cpsw;
soft_reset_slave(slave);
/* setup priority mapping */
__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
break;
@@ -1128,17 +1168,17 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
+ __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
- if (priv->data.dual_emac)
+ if (cpsw->data.dual_emac)
cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
else
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
if (slave->data->phy_node) {
@@ -1168,81 +1208,121 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
phy_start(slave->phy);
/* Configure GMII_SEL register */
- cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
+ cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
}
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
- const int vlan = priv->data.default_vlan;
+ struct cpsw_common *cpsw = priv->cpsw;
+ const int vlan = cpsw->data.default_vlan;
u32 reg;
int i;
int unreg_mcast_mask;
- reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
CPSW2_PORT_VLAN;
- writel(vlan, &priv->host_port_regs->port_vlan);
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
- for (i = 0; i < priv->data.slaves; i++)
- slave_write(priv->slaves + i, vlan, reg);
+ for (i = 0; i < cpsw->data.slaves; i++)
+ slave_write(cpsw->slaves + i, vlan, reg);
if (priv->ndev->flags & IFF_ALLMULTI)
unreg_mcast_mask = ALE_ALL_PORTS;
else
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
- cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
ALE_ALL_PORTS, ALE_ALL_PORTS,
unreg_mcast_mask);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
- u32 control_reg;
u32 fifo_mode;
+ u32 control_reg;
+ struct cpsw_common *cpsw = priv->cpsw;
/* soft reset the controller and initialize ale */
- soft_reset("cpsw", &priv->regs->soft_reset);
- cpsw_ale_start(priv->ale);
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
- control_reg = readl(&priv->regs->control);
+ control_reg = readl(&cpsw->regs->control);
control_reg |= CPSW_VLAN_AWARE;
- writel(control_reg, &priv->regs->control);
- fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ writel(control_reg, &cpsw->regs->control);
+ fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
CPSW_FIFO_NORMAL_MODE;
- writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
+ writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
- &priv->host_port_regs->cpdma_tx_pri_map);
- __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
- cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- if (!priv->data.dual_emac) {
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
+ if (!cpsw->data.dual_emac) {
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
0, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
}
}
-static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct sk_buff *skb;
+ int ch_buf_num;
+ int ch, i, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
+ for (i = 0; i < ch_buf_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(priv->ndev,
+ cpsw->rx_packet_max,
+ GFP_KERNEL);
+ if (!skb) {
+ cpsw_err(priv, ifup, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ skb_set_queue_mapping(skb, ch);
+ ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data,
+ skb_tailroom(skb), 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit skb to channel %d rx, error %d\n",
+ ch, ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
{
u32 slave_port;
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
soft_reset_slave(slave);
}
@@ -1250,115 +1330,111 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int i, ret;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
u32 reg;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (!cpsw_common_res_usage_state(priv))
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(cpsw))
+ cpsw_intr_disable(cpsw);
netif_carrier_off(ndev);
- reg = priv->version;
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err_cleanup;
+ }
+
+ reg = cpsw->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- if (!cpsw_common_res_usage_state(priv))
+ if (!cpsw_common_res_usage_state(cpsw))
cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
/* Add default VLAN */
- if (!priv->data.dual_emac)
+ if (!cpsw->data.dual_emac)
cpsw_add_default_vlan(priv);
else
- cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
+ cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
- if (!cpsw_common_res_usage_state(priv)) {
- struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
- int buf_num;
-
+ if (!cpsw_common_res_usage_state(cpsw)) {
/* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
/* disable priority elevation */
- __raw_writel(0, &priv->regs->ptype);
+ __raw_writel(0, &cpsw->regs->ptype);
/* enable statistics collection only on all ports */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ __raw_writel(0x7, &cpsw->regs->stat_port_en);
/* Enable internal fifo flow control */
- writel(0x7, &priv->regs->flow_control);
+ writel(0x7, &cpsw->regs->flow_control);
- napi_enable(&priv_sl0->napi_rx);
- napi_enable(&priv_sl0->napi_tx);
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
- if (priv_sl0->tx_irq_disabled) {
- priv_sl0->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
- if (priv_sl0->rx_irq_disabled) {
- priv_sl0->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
- buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
- for (i = 0; i < buf_num; i++) {
- struct sk_buff *skb;
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
- ret = -ENOMEM;
- skb = __netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max, GFP_KERNEL);
- if (!skb)
- goto err_cleanup;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), 0);
- if (ret < 0) {
- kfree_skb(skb);
- goto err_cleanup;
- }
- kmemleak_not_leak(skb);
- }
- /* continue even if we didn't manage to submit all
- * receive descs
- */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
-
- if (cpts_register(&priv->pdev->dev, priv->cpts,
- priv->data.cpts_clock_mult,
- priv->data.cpts_clock_shift))
+ if (cpts_register(cpsw->dev, cpsw->cpts,
+ cpsw->data.cpts_clock_mult,
+ cpsw->data.cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
}
/* Enable Interrupt pacing if configured */
- if (priv->coal_intvl != 0) {
+ if (cpsw->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = priv->coal_intvl;
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
- cpdma_ctlr_start(priv->dma);
- cpsw_intr_enable(priv);
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+
+ if (cpsw->data.dual_emac)
+ cpsw->slaves[priv->emac_port].open_stat = true;
+
+ netif_tx_start_all_queues(ndev);
- if (priv->data.dual_emac)
- priv->slaves[priv->emac_port].open_stat = true;
return 0;
err_cleanup:
- cpdma_ctlr_stop(priv->dma);
- for_each_slave(priv, cpsw_slave_stop, priv);
- pm_runtime_put_sync(&priv->pdev->dev);
+ cpdma_ctlr_stop(cpsw->dma);
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ pm_runtime_put_sync(cpsw->dev);
netif_carrier_off(priv->ndev);
return ret;
}
@@ -1366,25 +1442,24 @@ err_cleanup:
static int cpsw_ndo_stop(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- netif_stop_queue(priv->ndev);
+ netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
- if (cpsw_common_res_usage_state(priv) <= 1) {
- struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
-
- napi_disable(&priv_sl0->napi_rx);
- napi_disable(&priv_sl0->napi_tx);
- cpts_unregister(priv->cpts);
- cpsw_intr_disable(priv);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
- }
- for_each_slave(priv, cpsw_slave_stop, priv);
- pm_runtime_put_sync(&priv->pdev->dev);
- if (priv->data.dual_emac)
- priv->slaves[priv->emac_port].open_stat = false;
+ if (cpsw_common_res_usage_state(cpsw) <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ }
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ pm_runtime_put_sync(cpsw->dev);
+ if (cpsw->data.dual_emac)
+ cpsw->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -1392,7 +1467,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
netif_trans_update(ndev);
@@ -1403,12 +1481,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->cpts->tx_enable)
+ cpsw->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpsw_tx_packet_submit(ndev, priv, skb);
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txch[q_idx];
+ ret = cpsw_tx_packet_submit(priv, skb, txch);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
@@ -1417,24 +1500,27 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
/* If there is no more tx desc left free then we need to
* tell the kernel to stop sending us tx frames.
*/
- if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
- netif_stop_queue(ndev);
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ netif_tx_stop_queue(txq);
+ }
return NETDEV_TX_OK;
fail:
ndev->stats.tx_dropped++;
- netif_stop_queue(ndev);
+ txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
#ifdef CONFIG_TI_CPTS
-static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
+ if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -1442,10 +1528,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -1455,32 +1541,33 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
struct cpsw_slave *slave;
+ struct cpsw_common *cpsw = priv->cpsw;
u32 ctrl, mtype;
- if (priv->data.dual_emac)
- slave = &priv->slaves[priv->emac_port];
+ if (cpsw->data.dual_emac)
+ slave = &cpsw->slaves[priv->emac_port];
else
- slave = &priv->slaves[priv->data.active_slave];
+ slave = &cpsw->slaves[cpsw->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -1489,18 +1576,19 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
- __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+ __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
- if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2 &&
- priv->version != CPSW_VERSION_3)
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1540,9 +1628,9 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(priv);
+ cpsw_hwtstamp_v1(cpsw);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -1557,13 +1645,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = priv->cpts;
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpts *cpts = cpsw->cpts;
struct hwtstamp_config cfg;
- if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2 &&
- priv->version != CPSW_VERSION_3)
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
@@ -1579,7 +1667,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
if (!netif_running(dev))
return -EINVAL;
@@ -1593,27 +1682,33 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
#endif
}
- if (!priv->slaves[slave_no].phy)
+ if (!cpsw->slaves[slave_no].phy)
return -EOPNOTSUPP;
- return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
- cpsw_intr_disable(priv);
- cpdma_chan_stop(priv->txch);
- cpdma_chan_start(priv->txch);
- cpsw_intr_enable(priv);
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txch[ch]);
+ cpdma_chan_start(cpsw->txch[ch]);
+ }
+
+ cpsw_intr_enable(cpsw);
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_common *cpsw = priv->cpsw;
int flags = 0;
u16 vid = 0;
int ret;
@@ -1621,27 +1716,27 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
- vid = priv->slaves[priv->emac_port].port_vlan;
+ if (cpsw->data.dual_emac) {
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
flags = ALE_VLAN;
}
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
flags, vid);
- cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
for_each_slave(priv, cpsw_set_slave_mac, priv);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return 0;
}
@@ -1649,12 +1744,12 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_intr_disable(priv);
- cpsw_rx_interrupt(priv->irqs_table[0], priv);
- cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpsw_intr_enable(priv);
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
}
#endif
@@ -1664,8 +1759,9 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
int ret;
int unreg_mcast_mask = 0;
u32 port_mask;
+ struct cpsw_common *cpsw = priv->cpsw;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
if (priv->ndev->flags & IFF_ALLMULTI)
@@ -1679,27 +1775,27 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
}
- ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask);
if (ret != 0)
return ret;
- ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
goto clean_vid;
- ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
clean_vlan_ucast:
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
clean_vid:
- cpsw_ale_del_vlan(priv->ale, vid, 0);
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
return ret;
}
@@ -1707,26 +1803,27 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- if (vid == priv->data.default_vlan)
+ if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
* EMAC port separation
*/
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (vid == priv->slaves[i].port_vlan)
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
}
}
@@ -1734,7 +1831,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return ret;
}
@@ -1742,39 +1839,40 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- if (vid == priv->data.default_vlan)
+ if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (vid == priv->slaves[i].port_vlan)
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
}
}
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
- ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
if (ret != 0)
return ret;
- ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
- ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return ret;
}
@@ -1797,31 +1895,32 @@ static const struct net_device_ops cpsw_netdev_ops = {
static int cpsw_get_regs_len(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+ return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
}
static void cpsw_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
u32 *reg = p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* update CPSW IP version */
- regs->version = priv->version;
+ regs->version = cpsw->version;
- cpsw_ale_dump(priv->ale, reg);
+ cpsw_ale_dump(cpsw->ale, reg);
}
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev = to_platform_device(cpsw->dev);
strlcpy(info->driver, "cpsw", sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1840,7 +1939,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
#ifdef CONFIG_TI_CPTS
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1849,7 +1948,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts->phc_index;
+ info->phc_index = cpsw->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1872,10 +1971,11 @@ static int cpsw_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd);
else
return -EOPNOTSUPP;
}
@@ -1883,10 +1983,11 @@ static int cpsw_get_settings(struct net_device *ndev,
static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd);
else
return -EOPNOTSUPP;
}
@@ -1894,22 +1995,24 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
wol->supported = 0;
wol->wolopts = 0;
- if (priv->slaves[slave_no].phy)
- phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+ if (cpsw->slaves[slave_no].phy)
+ phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
}
static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
else
return -EOPNOTSUPP;
}
@@ -1940,12 +2043,13 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
static int cpsw_ethtool_op_begin(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
}
return ret;
@@ -1956,11 +2060,185 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
- ret = pm_runtime_put(&priv->pdev->dev);
+ ret = pm_runtime_put(priv->cpsw->dev);
if (ret < 0)
cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
}
+static void cpsw_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ch->max_combined = 0;
+ ch->max_rx = CPSW_MAX_QUEUES;
+ ch->max_tx = CPSW_MAX_QUEUES;
+ ch->max_other = 0;
+ ch->other_count = 0;
+ ch->rx_count = cpsw->rx_ch_num;
+ ch->tx_count = cpsw->tx_ch_num;
+ ch->combined_count = 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+ struct ethtool_channels *ch)
+{
+ if (ch->combined_count)
+ return -EINVAL;
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ if (ch->rx_count > cpsw->data.channels ||
+ ch->tx_count > cpsw->data.channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
+{
+ int (*poll)(struct napi_struct *, int);
+ struct cpsw_common *cpsw = priv->cpsw;
+ void (*handler)(void *, int, int);
+ struct cpdma_chan **chan;
+ int ret, *ch;
+
+ if (rx) {
+ ch = &cpsw->rx_ch_num;
+ chan = cpsw->rxch;
+ handler = cpsw_rx_handler;
+ poll = cpsw_rx_poll;
+ } else {
+ ch = &cpsw->tx_ch_num;
+ chan = cpsw->txch;
+ handler = cpsw_tx_handler;
+ poll = cpsw_tx_poll;
+ }
+
+ while (*ch < ch_num) {
+ chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+
+ if (IS_ERR(chan[*ch]))
+ return PTR_ERR(chan[*ch]);
+
+ if (!chan[*ch])
+ return -EINVAL;
+
+ cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ (*ch)++;
+ }
+
+ while (*ch > ch_num) {
+ (*ch)--;
+
+ ret = cpdma_chan_destroy(chan[*ch]);
+ if (ret)
+ return ret;
+
+ cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ }
+
+ return 0;
+}
+
+static int cpsw_update_channels(struct cpsw_priv *priv,
+ struct ethtool_channels *ch)
+{
+ int ret;
+
+ ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
+ if (ret)
+ return ret;
+
+ ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ /* Disable NAPI scheduling */
+ cpsw_intr_disable(cpsw);
+
+ /* Stop all transmit queues for every network device.
+ * Disable re-using rx descriptors with dormant_on.
+ */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ netif_tx_stop_all_queues(slave->ndev);
+ netif_dormant_on(slave->ndev);
+ }
+
+ /* Handle rest of tx packets and stop cpdma channels */
+ cpdma_ctlr_stop(cpsw->dma);
+ ret = cpsw_update_channels(priv, chs);
+ if (ret)
+ goto err;
+
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ /* Inform stack about new count of queues */
+ ret = netif_set_real_num_tx_queues(slave->ndev,
+ cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err;
+ }
+
+ ret = netif_set_real_num_rx_queues(slave->ndev,
+ cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err;
+ }
+
+ /* Enable rx packets handling */
+ netif_dormant_off(slave->ndev);
+ }
+
+ if (cpsw_common_res_usage_state(cpsw)) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ goto err;
+
+ /* After this receive is started */
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+ netif_tx_start_all_queues(slave->ndev);
+ }
+ return 0;
+err:
+ dev_err(priv->dev, "cannot update channels number, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1982,14 +2260,16 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_regs = cpsw_get_regs,
.begin = cpsw_ethtool_op_begin,
.complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
u32 slave_reg_ofs, u32 sliver_reg_ofs)
{
- void __iomem *regs = priv->regs;
+ void __iomem *regs = cpsw->regs;
int slave_num = slave->slave_num;
- struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
+ struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
slave->data = data;
slave->regs = regs + slave_reg_ofs;
@@ -2160,71 +2440,50 @@ no_phy_slave:
return 0;
}
-static int cpsw_probe_dual_emac(struct platform_device *pdev,
- struct cpsw_priv *priv)
+static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{
- struct cpsw_platform_data *data = &priv->data;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_platform_data *data = &cpsw->data;
struct net_device *ndev;
struct cpsw_priv *priv_sl2;
- int ret = 0, i;
+ int ret = 0;
- ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
if (!ndev) {
- dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
+ dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
}
priv_sl2 = netdev_priv(ndev);
- priv_sl2->data = *data;
- priv_sl2->pdev = pdev;
+ priv_sl2->cpsw = cpsw;
priv_sl2->ndev = ndev;
priv_sl2->dev = &ndev->dev;
priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- priv_sl2->rx_packet_max = max(rx_packet_max, 128);
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
ETH_ALEN);
- dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
+ priv_sl2->mac_addr);
} else {
random_ether_addr(priv_sl2->mac_addr);
- dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
+ priv_sl2->mac_addr);
}
memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
- priv_sl2->slaves = priv->slaves;
- priv_sl2->clk = priv->clk;
-
- priv_sl2->coal_intvl = 0;
- priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
-
- priv_sl2->regs = priv->regs;
- priv_sl2->host_port_regs = priv->host_port_regs;
- priv_sl2->wr_regs = priv->wr_regs;
- priv_sl2->hw_stats = priv->hw_stats;
- priv_sl2->dma = priv->dma;
- priv_sl2->txch = priv->txch;
- priv_sl2->rxch = priv->rxch;
- priv_sl2->ale = priv->ale;
priv_sl2->emac_port = 1;
- priv->slaves[1].ndev = ndev;
- priv_sl2->cpts = priv->cpts;
- priv_sl2->version = priv->version;
-
- for (i = 0; i < priv->num_irqs; i++) {
- priv_sl2->irqs_table[i] = priv->irqs_table[i];
- priv_sl2->num_irqs = priv->num_irqs;
- }
+ cpsw->slaves[1].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
/* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, cpsw->dev);
ret = register_netdev(ndev);
if (ret) {
- dev_err(&pdev->dev, "cpsw: error registering net device\n");
+ dev_err(cpsw->dev, "cpsw: error registering net device\n");
free_netdev(ndev);
ret = -ENODEV;
}
@@ -2272,6 +2531,7 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
static int cpsw_probe(struct platform_device *pdev)
{
+ struct clk *clk;
struct cpsw_platform_data *data;
struct net_device *ndev;
struct cpsw_priv *priv;
@@ -2282,10 +2542,14 @@ static int cpsw_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
+ struct cpsw_common *cpsw;
int ret = 0, i;
int irq;
- ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ cpsw->dev = &pdev->dev;
+
+ ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(&pdev->dev, "error allocating net_device\n");
return -ENOMEM;
@@ -2293,13 +2557,13 @@ static int cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
- priv->pdev = pdev;
+ priv->cpsw = cpsw;
priv->ndev = ndev;
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- priv->rx_packet_max = max(rx_packet_max, 128);
- priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- if (!priv->cpts) {
+ cpsw->rx_packet_max = max(rx_packet_max, 128);
+ cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!cpsw->cpts) {
dev_err(&pdev->dev, "error allocating cpts\n");
ret = -ENOMEM;
goto clean_ndev_ret;
@@ -2320,12 +2584,14 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&priv->data, pdev)) {
+ if (cpsw_probe_dt(&cpsw->data, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- data = &priv->data;
+ data = &cpsw->data;
+ cpsw->rx_ch_num = 1;
+ cpsw->tx_ch_num = 1;
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
@@ -2337,27 +2603,26 @@ static int cpsw_probe(struct platform_device *pdev)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
- priv->slaves = devm_kzalloc(&pdev->dev,
+ cpsw->slaves = devm_kzalloc(&pdev->dev,
sizeof(struct cpsw_slave) * data->slaves,
GFP_KERNEL);
- if (!priv->slaves) {
+ if (!cpsw->slaves) {
ret = -ENOMEM;
goto clean_runtime_disable_ret;
}
for (i = 0; i < data->slaves; i++)
- priv->slaves[i].slave_num = i;
+ cpsw->slaves[i].slave_num = i;
- priv->slaves[0].ndev = ndev;
+ cpsw->slaves[0].ndev = ndev;
priv->emac_port = 0;
- priv->clk = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(priv->clk)) {
+ clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- priv->coal_intvl = 0;
- priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
@@ -2365,7 +2630,7 @@ static int cpsw_probe(struct platform_device *pdev)
ret = PTR_ERR(ss_regs);
goto clean_runtime_disable_ret;
}
- priv->regs = ss_regs;
+ cpsw->regs = ss_regs;
/* Need to enable clocks with runtime PM api to access module
* registers
@@ -2375,24 +2640,24 @@ static int cpsw_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
- priv->version = readl(&priv->regs->id_ver);
+ cpsw->version = readl(&cpsw->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->wr_regs)) {
- ret = PTR_ERR(priv->wr_regs);
+ cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cpsw->wr_regs)) {
+ ret = PTR_ERR(cpsw->wr_regs);
goto clean_runtime_disable_ret;
}
memset(&dma_params, 0, sizeof(dma_params));
memset(&ale_params, 0, sizeof(ale_params));
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
- priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
- priv->hw_stats = ss_regs + CPSW1_HW_STATS;
+ cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -2404,9 +2669,9 @@ static int cpsw_probe(struct platform_device *pdev)
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
- priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
- priv->hw_stats = ss_regs + CPSW2_HW_STATS;
+ cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -2417,13 +2682,14 @@ static int cpsw_probe(struct platform_device *pdev)
(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
break;
default:
- dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
+ dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- for (i = 0; i < priv->data.slaves; i++) {
- struct cpsw_slave *slave = &priv->slaves[i];
- cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+
+ cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
slave_offset += slave_size;
sliver_offset += SLIVER_SIZE;
}
@@ -2443,19 +2709,16 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.has_ext_regs = true;
dma_params.desc_hw_addr = dma_params.desc_mem_phys;
- priv->dma = cpdma_ctlr_create(&dma_params);
- if (!priv->dma) {
+ cpsw->dma = cpdma_ctlr_create(&dma_params);
+ if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
goto clean_runtime_disable_ret;
}
- priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
- cpsw_tx_handler);
- priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
- cpsw_rx_handler);
-
- if (WARN_ON(!priv->txch || !priv->rxch)) {
+ cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) {
dev_err(priv->dev, "error initializing dma channels\n");
ret = -ENOMEM;
goto clean_dma_ret;
@@ -2466,8 +2729,8 @@ static int cpsw_probe(struct platform_device *pdev)
ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = data->slaves;
- priv->ale = cpsw_ale_create(&ale_params);
- if (!priv->ale) {
+ cpsw->ale = cpsw_ale_create(&ale_params);
+ if (!cpsw->ale) {
dev_err(priv->dev, "error initializing ale engine\n");
ret = -ENODEV;
goto clean_dma_ret;
@@ -2484,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (of_id) {
pdev->id_entry = of_id->data;
if (pdev->id_entry->driver_data)
- priv->quirk_irq = true;
+ cpsw->quirk_irq = true;
}
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
@@ -2502,9 +2765,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- priv->irqs_table[0] = irq;
+ cpsw->irqs_table[0] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
- 0, dev_name(&pdev->dev), priv);
+ 0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
goto clean_ale_ret;
@@ -2517,21 +2780,20 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- priv->irqs_table[1] = irq;
+ cpsw->irqs_table[1] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
- 0, dev_name(&pdev->dev), priv);
+ 0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
goto clean_ale_ret;
}
- priv->num_irqs = 2;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2545,8 +2807,8 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);
- if (priv->data.dual_emac) {
- ret = cpsw_probe_dual_emac(pdev, priv);
+ if (cpsw->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
goto clean_ale_ret;
@@ -2556,9 +2818,9 @@ static int cpsw_probe(struct platform_device *pdev)
return 0;
clean_ale_ret:
- cpsw_ale_destroy(priv->ale);
+ cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
- cpdma_ctlr_destroy(priv->dma);
+ cpdma_ctlr_destroy(cpsw->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
clean_ndev_ret:
@@ -2569,7 +2831,7 @@ clean_ndev_ret:
static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
@@ -2578,17 +2840,17 @@ static int cpsw_remove(struct platform_device *pdev)
return ret;
}
- if (priv->data.dual_emac)
- unregister_netdev(cpsw_get_slave_ndev(priv, 1));
+ if (cpsw->data.dual_emac)
+ unregister_netdev(cpsw->slaves[1].ndev);
unregister_netdev(ndev);
- cpsw_ale_destroy(priv->ale);
- cpdma_ctlr_destroy(priv->dma);
+ cpsw_ale_destroy(cpsw->ale);
+ cpdma_ctlr_destroy(cpsw->dma);
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (priv->data.dual_emac)
- free_netdev(cpsw_get_slave_ndev(priv, 1));
+ if (cpsw->data.dual_emac)
+ free_netdev(cpsw->slaves[1].ndev);
free_netdev(ndev);
return 0;
}
@@ -2598,14 +2860,14 @@ static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (netif_running(priv->slaves[i].ndev))
- cpsw_ndo_stop(priv->slaves[i].ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_stop(cpsw->slaves[i].ndev);
}
} else {
if (netif_running(ndev))
@@ -2613,7 +2875,7 @@ static int cpsw_suspend(struct device *dev)
}
/* Select sleep pin state */
- pinctrl_pm_select_sleep_state(&pdev->dev);
+ pinctrl_pm_select_sleep_state(dev);
return 0;
}
@@ -2622,17 +2884,17 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = netdev_priv(ndev);
/* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
+ pinctrl_pm_select_default_state(dev);
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (netif_running(priv->slaves[i].ndev))
- cpsw_ndo_open(priv->slaves[i].ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_open(cpsw->slaves[i].ndev);
}
} else {
if (netif_running(ndev))
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 19e5f32a8a64..c3f35f11a8fd 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -86,7 +86,7 @@ struct cpdma_desc_pool {
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
- int num_desc, used_desc;
+ int num_desc;
struct device *dev;
struct gen_pool *gen_pool;
};
@@ -104,6 +104,7 @@ struct cpdma_ctlr {
struct cpdma_desc_pool *pool;
spinlock_t lock;
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+ int chan_num;
};
struct cpdma_chan {
@@ -123,6 +124,13 @@ struct cpdma_chan {
int int_set, int_clear, td;
};
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
/* The following make access to common cpdma_ctlr params more readable */
#define dmaregs params.dmaregs
#define num_chan params.num_chan
@@ -148,7 +156,10 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
if (!pool)
return;
- WARN_ON(pool->used_desc);
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "cpdma_desc_pool size %d != avail %d",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
if (pool->cpumap)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
@@ -232,21 +243,14 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{
- struct cpdma_desc __iomem *desc = NULL;
-
- desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
- pool->desc_size);
- if (desc)
- pool->used_desc++;
-
- return desc;
+ return (struct cpdma_desc __iomem *)
+ gen_pool_alloc(pool->gen_pool, pool->desc_size);
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
- pool->used_desc--;
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->state = CPDMA_STATE_IDLE;
ctlr->params = *params;
ctlr->dev = params->dev;
+ ctlr->chan_num = 0;
spin_lock_init(&ctlr->lock);
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
@@ -336,12 +341,14 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
}
ctlr->state = CPDMA_STATE_TEARDOWN;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_stop(ctlr->channels[i]);
}
+ spin_lock_irqsave(&ctlr->lock, flags);
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
@@ -403,13 +410,52 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
+
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
+
+/**
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ struct cpdma_chan *chan;
+ int ch_desc_num;
+ int i;
+
+ if (!ctlr->chan_num)
+ return;
+
+ /* calculate average size of pool slice */
+ ch_desc_num = pool->num_desc / ctlr->chan_num;
+
+ /* split ctlr pool */
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ chan = ctlr->channels[i];
+ if (chan)
+ chan->desc_num = ch_desc_num;
+ }
+}
+
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
- cpdma_handler_fn handler)
+ cpdma_handler_fn handler, int rx_type)
{
+ int offset = chan_num * 4;
struct cpdma_chan *chan;
- int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
+ chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
+
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
@@ -451,14 +497,25 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
spin_lock_init(&chan->lock);
ctlr->channels[chan_num] = chan;
+ ctlr->chan_num++;
+
+ cpdma_chan_split_pool(ctlr);
+
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
{
- return ctlr->pool->num_desc / 2;
+ unsigned long flags;
+ int desc_num;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ desc_num = chan->desc_num;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return desc_num;
}
EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
@@ -475,6 +532,10 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
if (chan->state != CPDMA_STATE_IDLE)
cpdma_chan_stop(chan);
ctlr->channels[chan->chan_num] = NULL;
+ ctlr->chan_num--;
+
+ cpdma_chan_split_pool(ctlr);
+
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 4b46cd6e9a3f..a07b22b12bc1 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -17,13 +17,6 @@
#define CPDMA_MAX_CHANNELS BITS_PER_LONG
-#define tx_chan_num(chan) (chan)
-#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
-#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
-#define is_tx_chan(chan) (!is_rx_chan(chan))
-#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
-#define chan_linear(chan) __chan_linear((chan)->chan_num)
-
#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
#define CPDMA_EOI_RX_THRESH 0x0
@@ -79,8 +72,8 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
- cpdma_handler_fn handler);
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
+ cpdma_handler_fn handler, int rx_type);
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
@@ -94,6 +87,8 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 727a79f3c7dd..2fd94a5bc1f3 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -597,14 +597,14 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add - Hash function to add mac addr from hash table
+ * emac_hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
* @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
*/
-static int hash_add(struct emac_priv *priv, u8 *mac_addr)
+static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr)
{
struct device *emac_dev = &priv->ndev->dev;
u32 rc = 0;
@@ -613,7 +613,7 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
if (netif_msg_drv(priv)) {
- dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\
+ dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\
"Hash %08x, should not be greater than %08x",
hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
}
@@ -639,14 +639,14 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del - Hash function to delete mac addr from hash table
+ * emac_hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
* @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
*/
-static int hash_del(struct emac_priv *priv, u8 *mac_addr)
+static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
{
u32 hash_value;
u32 hash_bit;
@@ -696,10 +696,10 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
switch (action) {
case EMAC_MULTICAST_ADD:
- update = hash_add(priv, mac_addr);
+ update = emac_hash_add(priv, mac_addr);
break;
case EMAC_MULTICAST_DEL:
- update = hash_del(priv, mac_addr);
+ update = emac_hash_del(priv, mac_addr);
break;
case EMAC_ALL_MULTI_SET:
update = 1;
@@ -1870,10 +1870,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto no_pdata;
}
- priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
- emac_tx_handler);
- priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
- emac_rx_handler);
+ priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
+ emac_tx_handler, 0);
+ priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+ emac_rx_handler, 1);
if (WARN_ON(!priv->txchan || !priv->rxchan)) {
rc = -ENOMEM;
goto no_cpdma_chan;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 79f0ec4e51ac..bc258d7e41df 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1791,7 +1791,7 @@ fail_alloc_rx:
gelic_card_free_chain(card, card->tx_chain.head);
fail_alloc_tx:
free_irq(card->irq, card);
- netdev->irq = NO_IRQ;
+ netdev->irq = 0;
fail_request_irq:
ps3_sb_event_receive_port_destroy(dev, card->irq);
fail_alloc_irq:
@@ -1843,7 +1843,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
/* disconnect event port */
free_irq(card->irq, card);
- netdev0->irq = NO_IRQ;
+ netdev0->irq = 0;
ps3_sb_event_receive_port_destroy(card->dev, card->irq);
wait_event(card->waitq,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index f38696ceee74..908e72e18ef7 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1724,24 +1724,21 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
struct velocity_td_info *tdinfo, struct tx_desc *td)
{
struct sk_buff *skb = tdinfo->skb;
+ int i;
/*
* Don't unmap the pre-allocated tx_bufs
*/
- if (tdinfo->skb_dma) {
- int i;
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+ size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
- for (i = 0; i < tdinfo->nskb_dma; i++) {
- size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+ /* For scatter-gather */
+ if (skb_shinfo(skb)->nr_frags > 0)
+ pktlen = max_t(size_t, pktlen,
+ td->td_buf[i].size & ~TD_QUEUE);
- /* For scatter-gather */
- if (skb_shinfo(skb)->nr_frags > 0)
- pktlen = max_t(size_t, pktlen,
- td->td_buf[i].size & ~TD_QUEUE);
-
- dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
- le16_to_cpu(pktlen), DMA_TO_DEVICE);
- }
+ dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
dev_kfree_skb_irq(skb);
tdinfo->skb = NULL;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 4f5c024c6192..6d68c8a8f4f2 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,7 +18,7 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ)
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
select PHYLIB
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 36ee7ab300ae..69e2a833a84f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1297,7 +1297,7 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
-static struct ethtool_ops axienet_ethtool_ops = {
+static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,